#
tokens: 44775/50000 12/119 files (page 3/4)
lines: off (toggle) GitHub
raw markdown copy
This is page 3 of 4. Use http://codebase.md/wrale/mcp-server-tree-sitter?page={x} to view the full context.

# Directory Structure

```
├── .codestateignore
├── .github
│   └── workflows
│       ├── ci.yml
│       └── release.yml
├── .gitignore
├── .python-version
├── CONTRIBUTING.md
├── docs
│   ├── architecture.md
│   ├── cli.md
│   ├── config.md
│   ├── diagnostics.md
│   ├── logging.md
│   ├── requirements
│   │   └── logging.md
│   └── tree-sitter-type-safety.md
├── FEATURES.md
├── LICENSE
├── Makefile
├── NOTICE
├── pyproject.toml
├── README.md
├── ROADMAP.md
├── scripts
│   └── implementation-search.sh
├── src
│   └── mcp_server_tree_sitter
│       ├── __init__.py
│       ├── __main__.py
│       ├── api.py
│       ├── bootstrap
│       │   ├── __init__.py
│       │   └── logging_bootstrap.py
│       ├── cache
│       │   ├── __init__.py
│       │   └── parser_cache.py
│       ├── capabilities
│       │   ├── __init__.py
│       │   └── server_capabilities.py
│       ├── config.py
│       ├── context.py
│       ├── di.py
│       ├── exceptions.py
│       ├── language
│       │   ├── __init__.py
│       │   ├── query_templates.py
│       │   ├── registry.py
│       │   └── templates
│       │       ├── __init__.py
│       │       ├── apl.py
│       │       ├── c.py
│       │       ├── cpp.py
│       │       ├── go.py
│       │       ├── java.py
│       │       ├── javascript.py
│       │       ├── julia.py
│       │       ├── kotlin.py
│       │       ├── python.py
│       │       ├── rust.py
│       │       ├── swift.py
│       │       └── typescript.py
│       ├── logging_config.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── ast_cursor.py
│       │   ├── ast.py
│       │   └── project.py
│       ├── prompts
│       │   ├── __init__.py
│       │   └── code_patterns.py
│       ├── server.py
│       ├── testing
│       │   ├── __init__.py
│       │   └── pytest_diagnostic.py
│       ├── tools
│       │   ├── __init__.py
│       │   ├── analysis.py
│       │   ├── ast_operations.py
│       │   ├── debug.py
│       │   ├── file_operations.py
│       │   ├── project.py
│       │   ├── query_builder.py
│       │   ├── registration.py
│       │   └── search.py
│       └── utils
│           ├── __init__.py
│           ├── context
│           │   ├── __init__.py
│           │   └── mcp_context.py
│           ├── file_io.py
│           ├── path.py
│           ├── security.py
│           ├── tree_sitter_helpers.py
│           └── tree_sitter_types.py
├── tests
│   ├── __init__.py
│   ├── .gitignore
│   ├── conftest.py
│   ├── test_ast_cursor.py
│   ├── test_basic.py
│   ├── test_cache_config.py
│   ├── test_cli_arguments.py
│   ├── test_config_behavior.py
│   ├── test_config_manager.py
│   ├── test_context.py
│   ├── test_debug_flag.py
│   ├── test_di.py
│   ├── test_diagnostics
│   │   ├── __init__.py
│   │   ├── test_ast_parsing.py
│   │   ├── test_ast.py
│   │   ├── test_cursor_ast.py
│   │   ├── test_language_pack.py
│   │   ├── test_language_registry.py
│   │   └── test_unpacking_errors.py
│   ├── test_env_config.py
│   ├── test_failure_modes.py
│   ├── test_file_operations.py
│   ├── test_helpers.py
│   ├── test_language_listing.py
│   ├── test_logging_bootstrap.py
│   ├── test_logging_config_di.py
│   ├── test_logging_config.py
│   ├── test_logging_early_init.py
│   ├── test_logging_env_vars.py
│   ├── test_logging_handlers.py
│   ├── test_makefile_targets.py
│   ├── test_mcp_context.py
│   ├── test_models_ast.py
│   ├── test_persistent_server.py
│   ├── test_project_persistence.py
│   ├── test_query_result_handling.py
│   ├── test_registration.py
│   ├── test_rust_compatibility.py
│   ├── test_server_capabilities.py
│   ├── test_server.py
│   ├── test_symbol_extraction.py
│   ├── test_tree_sitter_helpers.py
│   ├── test_yaml_config_di.py
│   └── test_yaml_config.py
├── TODO.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/tests/test_file_operations.py:
--------------------------------------------------------------------------------

```python
"""Tests for file_operations.py module."""

import tempfile
from pathlib import Path
from typing import Any, Dict, Generator

import pytest

from mcp_server_tree_sitter.exceptions import FileAccessError
from mcp_server_tree_sitter.tools.file_operations import (
    count_lines,
    get_file_content,
    get_file_info,
    list_project_files,
)
from tests.test_helpers import register_project_tool


@pytest.fixture
def test_project() -> Generator[Dict[str, Any], None, None]:
    """Create a temporary test project with various file types."""
    with tempfile.TemporaryDirectory() as temp_dir:
        project_path = Path(temp_dir)

        # Create different file types
        # Python file
        python_file = project_path / "test.py"
        with open(python_file, "w") as f:
            f.write("def hello():\n    print('Hello, world!')\n\nhello()\n")

        # Text file
        text_file = project_path / "readme.txt"
        with open(text_file, "w") as f:
            f.write("This is a readme file.\nIt has multiple lines.\n")

        # Empty file
        empty_file = project_path / "empty.md"
        empty_file.touch()

        # Nested directory structure
        nested_dir = project_path / "nested"
        nested_dir.mkdir()
        nested_file = nested_dir / "nested.py"
        with open(nested_file, "w") as f:
            f.write("# A nested Python file\n")

        # A large file
        large_file = project_path / "large.log"
        with open(large_file, "w") as f:
            f.write("Line " + "x" * 100 + "\n" * 1000)  # 1000 lines with 100+ chars each

        # A hidden file and directory
        hidden_dir = project_path / ".hidden"
        hidden_dir.mkdir()
        hidden_file = hidden_dir / "hidden.txt"
        with open(hidden_file, "w") as f:
            f.write("This is a hidden file.\n")

        # Register the project
        project_name = "file_operations_test"
        try:
            register_project_tool(path=str(project_path), name=project_name)
        except Exception:
            # If registration fails, try with a more unique name
            import time

            project_name = f"file_operations_test_{int(time.time())}"
            register_project_tool(path=str(project_path), name=project_name)

        yield {
            "name": project_name,
            "path": str(project_path),
            "files": {
                "python": "test.py",
                "text": "readme.txt",
                "empty": "empty.md",
                "nested": "nested/nested.py",
                "large": "large.log",
                "hidden_dir": ".hidden",
                "hidden_file": ".hidden/hidden.txt",
            },
        }


# Test list_project_files function
def test_list_project_files_basic(test_project):
    """Test basic functionality of list_project_files."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # List all files
    files = list_project_files(project)

    # Verify basic files are listed
    assert test_project["files"]["python"] in files
    assert test_project["files"]["text"] in files
    assert test_project["files"]["empty"] in files
    assert test_project["files"]["nested"] in files


def test_list_project_files_with_pattern(test_project):
    """Test list_project_files with a glob pattern."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # List files with pattern
    python_files = list_project_files(project, pattern="**/*.py")

    # Verify only Python files are listed
    assert test_project["files"]["python"] in python_files
    assert test_project["files"]["nested"] in python_files
    assert test_project["files"]["text"] not in python_files
    assert test_project["files"]["empty"] not in python_files


def test_list_project_files_with_max_depth(test_project):
    """Test list_project_files with max_depth parameter."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # List files with max_depth=0 (only files in root)
    root_files = list_project_files(project, max_depth=0)

    # Verify only root files are listed
    assert test_project["files"]["python"] in root_files
    assert test_project["files"]["text"] in root_files
    assert test_project["files"]["empty"] in root_files
    assert test_project["files"]["nested"] not in root_files


def test_list_project_files_with_extensions(test_project):
    """Test list_project_files with extension filtering."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # List files with specific extensions
    md_files = list_project_files(project, filter_extensions=["md"])
    text_files = list_project_files(project, filter_extensions=["txt"])
    code_files = list_project_files(project, filter_extensions=["py"])

    # Verify correct filtering
    assert test_project["files"]["empty"] in md_files
    assert test_project["files"]["text"] in text_files
    assert test_project["files"]["python"] in code_files
    assert test_project["files"]["nested"] in code_files

    # Verify no cross-contamination
    assert test_project["files"]["python"] not in md_files
    assert test_project["files"]["text"] not in code_files


# Test get_file_content function
def test_get_file_content_basic(test_project):
    """Test basic functionality of get_file_content."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get content of Python file
    content = get_file_content(project, test_project["files"]["python"])

    # Verify content
    assert "def hello()" in content
    assert "print('Hello, world!')" in content


def test_get_file_content_empty(test_project):
    """Test get_file_content with an empty file."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get content of empty file
    content = get_file_content(project, test_project["files"]["empty"])

    # Verify content is empty
    assert content == ""


def test_get_file_content_with_line_limits(test_project):
    """Test get_file_content with line limiting parameters."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get content with max_lines
    content = get_file_content(project, test_project["files"]["python"], max_lines=2)

    # Verify only first two lines are returned
    assert "def hello()" in content  # Note the space - looking for function definition
    assert "print('Hello, world!')" in content
    assert "\nhello()" not in content  # Look for newline + hello() to find the function call line

    # Get content with start_line
    content = get_file_content(project, test_project["files"]["python"], start_line=2)

    # Verify only lines after start_line are returned
    assert "def hello()" not in content
    assert "hello()" in content


def test_get_file_content_nonexistent_file(test_project):
    """Test get_file_content with a nonexistent file."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Try to get content of a nonexistent file
    with pytest.raises(FileAccessError):
        get_file_content(project, "nonexistent.py")


def test_get_file_content_outside_project(test_project):
    """Test get_file_content with a path outside the project."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Try to get content of a file outside the project
    with pytest.raises(FileAccessError):
        get_file_content(project, "../outside.txt")


def test_get_file_content_as_bytes(test_project):
    """Test get_file_content with as_bytes=True."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get content as bytes
    content = get_file_content(project, test_project["files"]["python"], as_bytes=True)

    # Verify content is bytes
    assert isinstance(content, bytes)
    assert b"def hello()" in content


# Test get_file_info function
def test_get_file_info_basic(test_project):
    """Test basic functionality of get_file_info."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get info for Python file
    info = get_file_info(project, test_project["files"]["python"])

    # Verify info
    assert info["path"] == test_project["files"]["python"]
    assert info["size"] > 0
    assert info["is_directory"] is False
    assert info["extension"] == "py"
    assert info["line_count"] > 0


def test_get_file_info_directory(test_project):
    """Test get_file_info with a directory."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Get info for nested directory
    info = get_file_info(project, "nested")

    # Verify info
    assert info["path"] == "nested"
    assert info["is_directory"] is True
    assert info["line_count"] is None  # Line count should be None for directories


def test_get_file_info_nonexistent_file(test_project):
    """Test get_file_info with a nonexistent file."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Try to get info for a nonexistent file
    with pytest.raises(FileAccessError):
        get_file_info(project, "nonexistent.py")


def test_get_file_info_outside_project(test_project):
    """Test get_file_info with a path outside the project."""
    # Get project object
    from mcp_server_tree_sitter.api import get_project_registry

    project_registry = get_project_registry()
    project = project_registry.get_project(test_project["name"])

    # Try to get info for a file outside the project
    with pytest.raises(FileAccessError):
        get_file_info(project, "../outside.txt")


# Test count_lines function
def test_count_lines(test_project):
    """Test the count_lines function."""
    # Get absolute path to Python file
    python_file_path = Path(test_project["path"]) / test_project["files"]["python"]

    # Count lines
    line_count = count_lines(python_file_path)

    # Verify line count
    assert line_count == 4  # Based on the file content we created


def test_count_lines_empty_file(test_project):
    """Test count_lines with an empty file."""
    # Get absolute path to empty file
    empty_file_path = Path(test_project["path"]) / test_project["files"]["empty"]

    # Count lines
    line_count = count_lines(empty_file_path)

    # Verify line count
    assert line_count == 0


def test_count_lines_large_file(test_project):
    """Test count_lines with a large file."""
    # Get absolute path to large file
    large_file_path = Path(test_project["path"]) / test_project["files"]["large"]

    # Count lines
    line_count = count_lines(large_file_path)

    # Verify line count
    assert line_count == 1000  # Based on the file content we created

```

--------------------------------------------------------------------------------
/src/mcp_server_tree_sitter/tools/search.py:
--------------------------------------------------------------------------------

```python
"""Search tools for tree-sitter code analysis."""

import concurrent.futures
import re
from pathlib import Path
from typing import Any, Dict, List, Optional

from ..exceptions import QueryError, SecurityError
from ..utils.security import validate_file_access


def search_text(
    project: Any,
    pattern: str,
    file_pattern: Optional[str] = None,
    max_results: int = 100,
    case_sensitive: bool = False,
    whole_word: bool = False,
    use_regex: bool = False,
    context_lines: int = 0,
) -> List[Dict[str, Any]]:
    """
    Search for text pattern in project files.

    Args:
        project: Project object
        pattern: Text pattern to search for
        file_pattern: Optional glob pattern to filter files (e.g. "**/*.py")
        max_results: Maximum number of results to return
        case_sensitive: Whether to do case-sensitive matching
        whole_word: Whether to match whole words only
        use_regex: Whether to treat pattern as a regular expression
        context_lines: Number of context lines to include before/after matches

    Returns:
        List of matches with file, line number, and text
    """
    root = project.root_path

    results: List[Dict[str, Any]] = []
    pattern_obj = None

    # Prepare the pattern
    if use_regex:
        try:
            flags = 0 if case_sensitive else re.IGNORECASE
            pattern_obj = re.compile(pattern, flags)
        except re.error as e:
            raise ValueError(f"Invalid regular expression: {e}") from e
    elif whole_word:
        # Escape pattern for use in regex and add word boundary markers
        pattern_escaped = re.escape(pattern)
        flags = 0 if case_sensitive else re.IGNORECASE
        pattern_obj = re.compile(rf"\b{pattern_escaped}\b", flags)
    elif not case_sensitive:
        # For simple case-insensitive search
        pattern = pattern.lower()

    file_pattern = file_pattern or "**/*"

    # Process files in parallel
    def process_file(file_path: Path) -> List[Dict[str, Any]]:
        file_results = []
        try:
            validate_file_access(file_path, root)

            with open(file_path, "r", encoding="utf-8", errors="replace") as f:
                lines = f.readlines()

            for i, line in enumerate(lines, 1):
                match = False

                if pattern_obj:
                    # Using regex pattern
                    match_result = pattern_obj.search(line)
                    match = bool(match_result)
                elif case_sensitive:
                    # Simple case-sensitive search - check both original and stripped versions
                    match = pattern in line or pattern.strip() in line.strip()
                else:
                    # Simple case-insensitive search - check both original and stripped versions
                    line_lower = line.lower()
                    pattern_lower = pattern.lower()
                    match = pattern_lower in line_lower or pattern_lower.strip() in line_lower.strip()

                if match:
                    # Calculate context lines
                    start = max(0, i - 1 - context_lines)
                    end = min(len(lines), i + context_lines)

                    context = []
                    for ctx_i in range(start, end):
                        ctx_line = lines[ctx_i].rstrip("\n")
                        context.append(
                            {
                                "line": ctx_i + 1,
                                "text": ctx_line,
                                "is_match": ctx_i == i - 1,
                            }
                        )

                    file_results.append(
                        {
                            "file": str(file_path.relative_to(root)),
                            "line": i,
                            "text": line.rstrip("\n"),
                            "context": context,
                        }
                    )

                    if len(file_results) >= max_results:
                        break
        except Exception:
            # Skip files that can't be read
            pass

        return file_results

    # Collect files to process
    files_to_process = []
    for path in root.glob(file_pattern):
        if path.is_file():
            files_to_process.append(path)

    # Process files in parallel
    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = [executor.submit(process_file, f) for f in files_to_process]
        for future in concurrent.futures.as_completed(futures):
            results.extend(future.result())
            if len(results) >= max_results:
                # Cancel any pending futures
                for f in futures:
                    f.cancel()
                break

    return results[:max_results]


def query_code(
    project: Any,
    query_string: str,
    language_registry: Any,
    tree_cache: Any,
    file_path: Optional[str] = None,
    language: Optional[str] = None,
    max_results: int = 100,
    include_snippets: bool = True,
) -> List[Dict[str, Any]]:
    """
    Run a tree-sitter query on code files.

    Args:
        project: Project object
        query_string: Tree-sitter query string
        language_registry: Language registry
        tree_cache: Tree cache instance
        file_path: Optional specific file to query
        language: Language to use (required if file_path not provided)
        max_results: Maximum number of results to return
        include_snippets: Whether to include code snippets in results

    Returns:
        List of query matches
    """
    root = project.root_path
    results: List[Dict[str, Any]] = []

    if file_path is not None:
        # Query a specific file
        abs_path = project.get_file_path(file_path)

        try:
            validate_file_access(abs_path, root)
        except SecurityError as e:
            raise SecurityError(f"Access denied: {e}") from e

        # Detect language if not provided
        if not language:
            detected_language = language_registry.language_for_file(file_path)
            if detected_language:
                language = detected_language
            if not language:
                raise QueryError(f"Could not detect language for {file_path}")

        try:
            # Check if we have a cached tree
            assert language is not None  # For type checking
            cached = tree_cache.get(abs_path, language)
            if cached:
                tree, source_bytes = cached
            else:
                # Parse file
                with open(abs_path, "rb") as f:
                    source_bytes = f.read()

                parser = language_registry.get_parser(language)
                tree = parser.parse(source_bytes)

                # Cache the tree
                tree_cache.put(abs_path, language, tree, source_bytes)

            # Execute query
            lang = language_registry.get_language(language)
            query = lang.query(query_string)

            captures = query.captures(tree.root_node)

            # Handle different return formats from query.captures()
            if isinstance(captures, dict):
                # Dictionary format: {capture_name: [node1, node2, ...], ...}
                for capture_name, nodes in captures.items():
                    for node in nodes:
                        # Skip if we've reached max results
                        if max_results is not None and len(results) >= max_results:
                            break

                        try:
                            from ..utils.tree_sitter_helpers import get_node_text

                            text = get_node_text(node, source_bytes, decode=True)
                        except Exception:
                            text = "<binary data>"

                        result = {
                            "file": file_path,
                            "capture": capture_name,
                            "start": {
                                "row": node.start_point[0],
                                "column": node.start_point[1],
                            },
                            "end": {
                                "row": node.end_point[0],
                                "column": node.end_point[1],
                            },
                        }

                        if include_snippets:
                            result["text"] = text

                        results.append(result)
            else:
                # List format: [(node1, capture_name1), (node2, capture_name2), ...]
                for match in captures:
                    # Handle different return types from query.captures()
                    if isinstance(match, tuple) and len(match) == 2:
                        # Direct tuple unpacking
                        node, capture_name = match
                    elif hasattr(match, "node") and hasattr(match, "capture_name"):
                        # Object with node and capture_name attributes
                        node, capture_name = match.node, match.capture_name
                    elif isinstance(match, dict) and "node" in match and "capture" in match:
                        # Dictionary with node and capture keys
                        node, capture_name = match["node"], match["capture"]
                    else:
                        # Skip if format is unknown
                        continue

                    # Skip if we've reached max results
                    if max_results is not None and len(results) >= max_results:
                        break

                    try:
                        from ..utils.tree_sitter_helpers import get_node_text

                        text = get_node_text(node, source_bytes, decode=True)
                    except Exception:
                        text = "<binary data>"

                    result = {
                        "file": file_path,
                        "capture": capture_name,
                        "start": {
                            "row": node.start_point[0],
                            "column": node.start_point[1],
                        },
                        "end": {"row": node.end_point[0], "column": node.end_point[1]},
                    }

                    if include_snippets:
                        result["text"] = text

                    results.append(result)
        except Exception as e:
            raise QueryError(f"Error querying {file_path}: {e}") from e
    else:
        # Query across multiple files
        if not language:
            raise QueryError("Language is required when file_path is not provided")

        # Find all matching files for the language
        extensions = [(ext, lang) for ext, lang in language_registry._language_map.items() if lang == language]

        if not extensions:
            raise QueryError(f"No file extensions found for language {language}")

        # Process files in parallel
        def process_file(rel_path: str) -> List[Dict[str, Any]]:
            try:
                # Use single-file version of query_code
                file_results = query_code(
                    project,
                    query_string,
                    language_registry,
                    tree_cache,
                    rel_path,
                    language,
                    max_results if max_results is None else max_results - len(results),
                    include_snippets,
                )
                return file_results
            except Exception:
                # Skip files that can't be queried
                return []

        # Collect files to process
        files_to_process = []
        for ext, _ in extensions:
            for path in root.glob(f"**/*.{ext}"):
                if path.is_file():
                    files_to_process.append(str(path.relative_to(root)))

        # Process files until we reach max_results
        for file in files_to_process:
            try:
                file_results = process_file(file)
                results.extend(file_results)

                if max_results is not None and len(results) >= max_results:
                    break
            except Exception:
                # Skip files that cause errors
                continue

    return results[:max_results] if max_results is not None else results

```

--------------------------------------------------------------------------------
/tests/test_debug_flag.py:
--------------------------------------------------------------------------------

```python
"""Tests for debug flag behavior and environment variable processing."""

import io
import logging
import os

import pytest

from mcp_server_tree_sitter.bootstrap import update_log_levels
from mcp_server_tree_sitter.bootstrap.logging_bootstrap import get_log_level_from_env


def test_debug_flag_with_preexisting_env():
    """Test that debug flag works correctly with pre-existing environment variables.

    This test simulates the real-world scenario where the logging is configured
    at import time, but the debug flag is processed later. In this case, the
    debug flag should still trigger a reconfiguration of logging levels.
    """
    # Save original environment and logger state
    original_env = os.environ.get("MCP_TS_LOG_LEVEL")

    # Get the root package logger
    pkg_logger = logging.getLogger("mcp_server_tree_sitter")
    original_level = pkg_logger.level

    # Create a clean test environment
    if "MCP_TS_LOG_LEVEL" in os.environ:
        del os.environ["MCP_TS_LOG_LEVEL"]

    # Set logger level to INFO explicitly
    pkg_logger.setLevel(logging.INFO)

    # Create a test handler to verify levels change
    test_handler = logging.StreamHandler()
    test_handler.setLevel(logging.INFO)
    pkg_logger.addHandler(test_handler)

    try:
        # Simulate the debug flag processing
        # First verify we're starting at INFO level
        assert pkg_logger.level == logging.INFO, "Logger should start at INFO level"
        assert test_handler.level == logging.INFO, "Handler should start at INFO level"

        # Now process the debug flag (this is what happens in main())
        os.environ["MCP_TS_LOG_LEVEL"] = "DEBUG"
        update_log_levels("DEBUG")

        # Verify the change was applied
        assert pkg_logger.level == logging.DEBUG, "Logger level should be changed to DEBUG"
        assert test_handler.level == logging.DEBUG, "Handler level should be changed to DEBUG"

        # Verify that new loggers created after updating will inherit the correct level
        new_logger = logging.getLogger("mcp_server_tree_sitter.test.new_module")
        assert new_logger.getEffectiveLevel() == logging.DEBUG, "New loggers should inherit DEBUG level"

    finally:
        # Cleanup
        pkg_logger.removeHandler(test_handler)

        # Restore original environment
        if original_env is not None:
            os.environ["MCP_TS_LOG_LEVEL"] = original_env
        else:
            if "MCP_TS_LOG_LEVEL" in os.environ:
                del os.environ["MCP_TS_LOG_LEVEL"]

        # Restore logger state
        pkg_logger.setLevel(original_level)


def test_update_log_levels_reconfigures_root_logger():
    """Test that update_log_levels also updates the root logger.

    This tests the enhanced implementation that reconfigures the root
    logger in addition to the package logger, which helps with debug
    flag handling when a module is already imported.
    """
    # Save original logger states
    root_logger = logging.getLogger()
    pkg_logger = logging.getLogger("mcp_server_tree_sitter")
    original_root_level = root_logger.level
    original_pkg_level = pkg_logger.level

    # Create handlers for testing
    root_handler = logging.StreamHandler()
    root_handler.setLevel(logging.INFO)
    root_logger.addHandler(root_handler)

    pkg_handler = logging.StreamHandler()
    pkg_handler.setLevel(logging.INFO)
    pkg_logger.addHandler(pkg_handler)

    try:
        # Set loggers to INFO level
        root_logger.setLevel(logging.INFO)
        pkg_logger.setLevel(logging.INFO)

        # Verify initial levels
        assert root_logger.level == logging.INFO, "Root logger should start at INFO level"
        assert pkg_logger.level == logging.INFO, "Package logger should start at INFO level"
        assert root_handler.level == logging.INFO, "Root handler should start at INFO level"
        assert pkg_handler.level == logging.INFO, "Package handler should start at INFO level"

        # Call update_log_levels with DEBUG
        update_log_levels("DEBUG")

        # Verify all loggers and handlers are updated
        assert root_logger.level == logging.DEBUG, "Root logger should be updated to DEBUG level"
        assert pkg_logger.level == logging.DEBUG, "Package logger should be updated to DEBUG level"
        assert root_handler.level == logging.DEBUG, "Root handler should be updated to DEBUG level"
        assert pkg_handler.level == logging.DEBUG, "Package handler should be updated to DEBUG level"

        # Test with a new child logger
        child_logger = logging.getLogger("mcp_server_tree_sitter.test.child")
        assert child_logger.getEffectiveLevel() == logging.DEBUG, "Child logger should inherit DEBUG level from parent"

    finally:
        # Clean up
        root_logger.removeHandler(root_handler)
        pkg_logger.removeHandler(pkg_handler)

        # Restore original levels
        root_logger.setLevel(original_root_level)
        pkg_logger.setLevel(original_pkg_level)


def test_environment_variable_updates_log_level():
    """Test that setting MCP_TS_LOG_LEVEL changes the logging level correctly."""
    # Save original environment and logger state
    original_env = os.environ.get("MCP_TS_LOG_LEVEL")

    # Get the root package logger
    pkg_logger = logging.getLogger("mcp_server_tree_sitter")
    original_level = pkg_logger.level

    try:
        # First test with DEBUG level
        os.environ["MCP_TS_LOG_LEVEL"] = "DEBUG"

        # Verify the get_log_level_from_env function returns DEBUG
        level = get_log_level_from_env()
        assert level == logging.DEBUG, f"Expected DEBUG level but got {level}"

        # Update log levels and verify the logger is set to DEBUG
        update_log_levels("DEBUG")
        assert pkg_logger.level == logging.DEBUG, f"Logger level should be DEBUG but was {pkg_logger.level}"

        # Check handler levels are synchronized
        for handler in pkg_logger.handlers:
            assert handler.level == logging.DEBUG, f"Handler level should be DEBUG but was {handler.level}"

        # Next test with INFO level
        os.environ["MCP_TS_LOG_LEVEL"] = "INFO"

        # Verify the get_log_level_from_env function returns INFO
        level = get_log_level_from_env()
        assert level == logging.INFO, f"Expected INFO level but got {level}"

        # Update log levels and verify the logger is set to INFO
        update_log_levels("INFO")
        assert pkg_logger.level == logging.INFO, f"Logger level should be INFO but was {pkg_logger.level}"

        # Check handler levels are synchronized
        for handler in pkg_logger.handlers:
            assert handler.level == logging.INFO, f"Handler level should be INFO but was {handler.level}"

    finally:
        # Restore original environment
        if original_env is not None:
            os.environ["MCP_TS_LOG_LEVEL"] = original_env
        else:
            if "MCP_TS_LOG_LEVEL" in os.environ:
                del os.environ["MCP_TS_LOG_LEVEL"]

        # Restore logger state
        pkg_logger.setLevel(original_level)


def test_configure_root_logger_syncs_handlers():
    """Test that configure_root_logger synchronizes handler levels for existing loggers."""
    from mcp_server_tree_sitter.bootstrap.logging_bootstrap import configure_root_logger

    # Save original environment and logger state
    original_env = os.environ.get("MCP_TS_LOG_LEVEL")

    # Create a test logger in the package hierarchy
    test_logger = logging.getLogger("mcp_server_tree_sitter.test.debug_flag")
    original_test_level = test_logger.level

    # Get the root package logger
    pkg_logger = logging.getLogger("mcp_server_tree_sitter")
    original_pkg_level = pkg_logger.level

    # Create handlers with different levels
    debug_handler = logging.StreamHandler()
    debug_handler.setLevel(logging.DEBUG)

    info_handler = logging.StreamHandler()
    info_handler.setLevel(logging.INFO)

    # Add handlers to the test logger
    test_logger.addHandler(debug_handler)
    test_logger.addHandler(info_handler)

    try:
        # Set environment variable to DEBUG
        os.environ["MCP_TS_LOG_LEVEL"] = "DEBUG"

        # Call configure_root_logger
        configure_root_logger()

        # Verify the root package logger is set to DEBUG
        assert pkg_logger.level == logging.DEBUG, (
            f"Root package logger level should be DEBUG but was {pkg_logger.level}"
        )

        # Verify child logger still has its original level (should not be explicitly set)
        assert test_logger.level == original_test_level, (
            "Child logger level should not be changed by configure_root_logger"
        )

        # Verify child logger's effective level is inherited from root package logger
        assert test_logger.getEffectiveLevel() == logging.DEBUG, (
            f"Child logger effective level should be DEBUG but was {test_logger.getEffectiveLevel()}"
        )

        # Verify all handlers of the test logger are synchronized to DEBUG
        for handler in test_logger.handlers:
            assert handler.level == logging.DEBUG, f"Handler level should be DEBUG but was {handler.level}"

    finally:
        # Clean up
        test_logger.removeHandler(debug_handler)
        test_logger.removeHandler(info_handler)

        # Restore original environment
        if original_env is not None:
            os.environ["MCP_TS_LOG_LEVEL"] = original_env
        else:
            if "MCP_TS_LOG_LEVEL" in os.environ:
                del os.environ["MCP_TS_LOG_LEVEL"]

        # Restore logger state
        test_logger.setLevel(original_test_level)
        pkg_logger.setLevel(original_pkg_level)


def test_log_message_levels():
    """Test that log messages about environment variables use the DEBUG level."""
    # Save original environment state
    original_env = {}
    for key in list(os.environ.keys()):
        if key.startswith("MCP_TS_"):
            original_env[key] = os.environ[key]
            del os.environ[key]

    try:
        # Test variable for configuration
        os.environ["MCP_TS_CACHE_MAX_SIZE_MB"] = "256"

        # Create a StringIO to capture log output
        log_output = io.StringIO()

        # Create a handler that writes to our StringIO
        handler = logging.StreamHandler(log_output)
        handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
        handler.setFormatter(formatter)

        # Add the handler to the root logger
        root_logger = logging.getLogger()
        root_logger.addHandler(handler)

        # Save the original log level
        original_level = root_logger.level

        # Set the log level to DEBUG to capture all messages
        root_logger.setLevel(logging.DEBUG)

        try:
            # Import config to trigger environment variable processing
            from mcp_server_tree_sitter.config import ServerConfig

            # Create a new config instance to trigger environment variable processing
            # Variable is intentionally used to trigger processing
            _ = ServerConfig()

            # Get the output
            log_content = log_output.getvalue()

            # Check for environment variable application messages
            env_messages = [line for line in log_content.splitlines() if "Applied environment variable" in line]

            # Verify that these messages use DEBUG level, not INFO
            for msg in env_messages:
                assert msg.startswith("DEBUG:"), f"Environment variable message should use DEBUG level but found: {msg}"

            # Check if there are any environment variable messages at INFO level
            info_env_messages = [
                line
                for line in log_content.splitlines()
                if "Applied environment variable" in line and line.startswith("INFO:")
            ]

            assert not info_env_messages, (
                f"No environment variable messages should use INFO level, but found: {info_env_messages}"
            )

        finally:
            # Restore original log level
            root_logger.setLevel(original_level)

            # Remove our handler
            root_logger.removeHandler(handler)

    finally:
        # Restore original environment
        for key in list(os.environ.keys()):
            if key.startswith("MCP_TS_"):
                del os.environ[key]

        for key, value in original_env.items():
            os.environ[key] = value


if __name__ == "__main__":
    pytest.main(["-v", __file__])

```

--------------------------------------------------------------------------------
/tests/test_rust_compatibility.py:
--------------------------------------------------------------------------------

```python
"""Tests for Rust compatibility in the Tree-sitter server."""

import tempfile
import time
from pathlib import Path
from typing import Any, Dict, Generator

import pytest

from tests.test_helpers import (
    get_ast,
    get_dependencies,
    get_symbols,
    register_project_tool,
    run_query,
)


@pytest.fixture
def rust_project(request) -> Generator[Dict[str, Any], None, None]:
    """Create a test project with Rust files."""
    with tempfile.TemporaryDirectory() as temp_dir:
        project_path = Path(temp_dir)

        # Create a simple Rust file
        main_rs = project_path / "main.rs"
        with open(main_rs, "w") as f:
            f.write(
                """
use std::io;
use std::collections::HashMap;

struct Person {
    name: String,
    age: u32,
}

impl Person {
    fn new(name: &str, age: u32) -> Person {
        Person {
            name: String::from(name),
            age,
        }
    }

    fn greet(&self) -> String {
        format!("Hello, my name is {} and I'm {} years old.", self.name, self.age)
    }
}

fn calculate_ages(people: &Vec<Person>) -> HashMap<String, u32> {
    let mut ages = HashMap::new();
    for person in people {
        ages.insert(person.name.clone(), person.age);
    }
    ages
}

fn main() {
    println!("Rust Sample Program");

    let mut people = Vec::new();
    people.push(Person::new("Alice", 30));
    people.push(Person::new("Bob", 25));

    for person in &people {
        println!("{}", person.greet());
    }

    let ages = calculate_ages(&people);
    println!("Ages: {:?}", ages);
}
"""
            )

        # Create a library file
        lib_rs = project_path / "lib.rs"
        with open(lib_rs, "w") as f:
            f.write(
                """
use std::fs;
use std::fs::File;
use std::io::{self, Read, Write};
use std::path::Path;

pub struct FileHandler {
    base_path: String,
}

impl FileHandler {
    pub fn new(base_path: &str) -> FileHandler {
        FileHandler {
            base_path: String::from(base_path),
        }
    }

    pub fn read_file(&self, filename: &str) -> Result<String, io::Error> {
        let path = format!("{}/{}", self.base_path, filename);
        fs::read_to_string(path)
    }

    pub fn write_file(&self, filename: &str, content: &str) -> Result<(), io::Error> {
        let path = format!("{}/{}", self.base_path, filename);
        let mut file = File::create(path)?;
        file.write_all(content.as_bytes())?;
        Ok(())
    }
}

pub fn list_files(dir: &str) -> Result<Vec<String>, io::Error> {
    let mut files = Vec::new();
    for entry in fs::read_dir(dir)? {
        let entry = entry?;
        let path = entry.path();
        if path.is_file() {
            if let Some(filename) = path.file_name() {
                if let Some(name) = filename.to_str() {
                    files.push(String::from(name));
                }
            }
        }
    }
    Ok(files)
}
"""
            )

        # Generate a unique project name based on the test name
        test_name = request.node.name
        unique_id = abs(hash(test_name)) % 10000
        project_name = f"rust_test_project_{unique_id}"

        # Register project with retry mechanism
        try:
            register_project_tool(path=str(project_path), name=project_name)
        except Exception:
            # If registration fails, try with an even more unique name
            project_name = f"rust_test_project_{unique_id}_{int(time.time())}"
            register_project_tool(path=str(project_path), name=project_name)

        yield {
            "name": project_name,
            "path": str(project_path),
            "files": ["main.rs", "lib.rs"],
        }


def test_rust_ast_parsing(rust_project) -> None:
    """Test that Rust code can be parsed into an AST correctly."""
    # Get AST for main.rs
    ast_result = get_ast(
        project=rust_project["name"],
        path="main.rs",
        max_depth=5,
        include_text=True,
    )

    # Verify AST structure
    assert "tree" in ast_result, "AST result should contain a tree"
    assert "language" in ast_result, "AST result should contain language info"
    assert ast_result["language"] == "rust", "Language should be identified as Rust"

    # Check tree has the expected structure
    tree = ast_result["tree"]
    assert tree["type"] == "source_file", "Root node should be a source_file"
    assert "children" in tree, "Tree should have children"

    # Look for key Rust constructs in the AST
    structs_found = []
    functions_found = []
    impl_blocks_found = []

    def find_nodes(node, node_types) -> None:
        if isinstance(node, dict) and "type" in node:
            if node["type"] == "struct_item":
                if "children" in node:
                    for child in node["children"]:
                        if child.get("type") == "type_identifier":
                            structs_found.append(child.get("text", ""))
            elif node["type"] == "function_item":
                if "children" in node:
                    for child in node["children"]:
                        if child.get("type") == "identifier":
                            functions_found.append(child.get("text", ""))
            elif node["type"] == "impl_item":
                impl_blocks_found.append(node)

            if "children" in node:
                for child in node["children"]:
                    find_nodes(child, node_types)

    find_nodes(tree, ["struct_item", "function_item", "impl_item"])

    # Check for Person struct - handle both bytes and strings
    person_found = False
    for name in structs_found:
        if (isinstance(name, bytes) and b"Person" in name) or (isinstance(name, str) and "Person" in name):
            person_found = True
            break
    assert person_found, "Should find Person struct"
    # Check for main and calculate_ages functions - handle both bytes and strings
    main_found = False
    calc_found = False
    for name in functions_found:
        if (isinstance(name, bytes) and b"main" in name) or (isinstance(name, str) and "main" in name):
            main_found = True
        if (isinstance(name, bytes) and b"calculate_ages" in name) or (
            isinstance(name, str) and "calculate_ages" in name
        ):
            calc_found = True

    assert main_found, "Should find main function"
    assert calc_found, "Should find calculate_ages function"
    assert len(impl_blocks_found) > 0, "Should find impl blocks"


def test_rust_symbol_extraction(rust_project) -> None:
    """Test that symbols can be extracted from Rust code."""
    # Get symbols for main.rs
    symbols = get_symbols(project=rust_project["name"], file_path="main.rs")

    # Verify structure of symbols
    assert "structs" in symbols, "Symbols should include structs"
    assert "functions" in symbols, "Symbols should include functions"
    assert "imports" in symbols, "Symbols should include imports"

    # Check for specific symbols we expect
    struct_names = [s.get("name", "") for s in symbols.get("structs", [])]
    function_names = [f.get("name", "") for f in symbols.get("functions", [])]

    # Check for Person struct - handle both bytes and strings
    person_found = False
    for name in struct_names:
        if (isinstance(name, bytes) and b"Person" in name) or (isinstance(name, str) and "Person" in name):
            person_found = True
            break
    assert person_found, "Should find Person struct"
    # Check for main and calculate_ages functions - handle both bytes and strings
    main_found = False
    calc_found = False
    for name in function_names:
        if (isinstance(name, bytes) and b"main" in name) or (isinstance(name, str) and "main" in name):
            main_found = True
        if (isinstance(name, bytes) and b"calculate_ages" in name) or (
            isinstance(name, str) and "calculate_ages" in name
        ):
            calc_found = True

    assert main_found, "Should find main function"
    assert calc_found, "Should find calculate_ages function"


def test_rust_dependency_analysis(rust_project) -> None:
    """Test that dependencies can be identified in Rust code."""
    # Get dependencies for main.rs
    dependencies = get_dependencies(project=rust_project["name"], file_path="main.rs")

    # Verify dependencies structure
    assert isinstance(dependencies, dict), "Dependencies should be a dictionary"

    # Check for standard library dependencies
    all_deps = str(dependencies)  # Convert to string for easy checking
    assert "std::io" in all_deps, "Should find std::io dependency"
    assert "std::collections::HashMap" in all_deps, "Should find HashMap dependency"


def test_rust_specific_queries(rust_project) -> None:
    """Test that Rust-specific queries can be executed on the AST."""
    # Define a query to find struct definitions
    struct_query = """
    (struct_item
      name: (type_identifier) @struct.name
      body: (field_declaration_list) @struct.body
    ) @struct.def
    """

    # Run the query
    struct_results = run_query(
        project=rust_project["name"],
        query=struct_query,
        file_path="main.rs",
        language="rust",
    )

    # Verify results
    assert isinstance(struct_results, list), "Query results should be a list"
    assert len(struct_results) > 0, "Should find at least one struct"

    # Check for Person struct
    person_found = False
    for result in struct_results:
        if result.get("capture") == "struct.name" and result.get("text") == "Person":
            person_found = True
            break

    assert person_found, "Should find Person struct in query results"

    # Define a query to find impl blocks
    impl_query = """
    (impl_item
      trait: (type_identifier)? @impl.trait
      type: (type_identifier) @impl.type
      body: (declaration_list) @impl.body
    ) @impl.def
    """

    # Run the query
    impl_results = run_query(
        project=rust_project["name"],
        query=impl_query,
        file_path="main.rs",
        language="rust",
    )

    # Verify results
    assert isinstance(impl_results, list), "Query results should be a list"
    assert len(impl_results) > 0, "Should find at least one impl block"

    # Check for Person impl
    person_impl_found = False
    for result in impl_results:
        if result.get("capture") == "impl.type" and result.get("text") == "Person":
            person_impl_found = True
            break

    assert person_impl_found, "Should find Person impl in query results"


def test_rust_trait_and_macro_handling(rust_project) -> None:
    """Test handling of Rust-specific constructs like traits and macros."""
    # Create a file with traits and macros
    trait_file = Path(rust_project["path"]) / "traits.rs"
    with open(trait_file, "w") as f:
        f.write(
            """
pub trait Display {
    fn display(&self) -> String;
}

pub trait Calculate {
    fn calculate(&self) -> f64;
}

// Implement both traits for a struct
pub struct Value {
    pub x: f64,
    pub y: f64,
}

impl Display for Value {
    fn display(&self) -> String {
        format!("Value({}, {})", self.x, self.y)
    }
}

impl Calculate for Value {
    fn calculate(&self) -> f64 {
        self.x * self.y
    }
}

// A macro
macro_rules! create_value {
    ($x:expr, $y:expr) => {
        Value { x: $x, y: $y }
    };
}

fn main() {
    let v = create_value!(2.5, 3.0);
    println!("{}: {}", v.display(), v.calculate());
}
"""
        )

    # Get AST for this file
    ast_result = get_ast(
        project=rust_project["name"],
        path="traits.rs",
        max_depth=5,
        include_text=True,
    )

    # Look for trait definitions and macro rules
    traits_found = []
    macros_found = []

    def find_specific_nodes(node) -> None:
        if isinstance(node, dict) and "type" in node:
            if node["type"] == "trait_item":
                if "children" in node:
                    for child in node["children"]:
                        if child.get("type") == "type_identifier":
                            traits_found.append(child.get("text", ""))
            elif node["type"] == "macro_definition":
                if "children" in node:
                    for child in node["children"]:
                        if child.get("type") == "identifier":
                            macros_found.append(child.get("text", ""))

            if "children" in node:
                for child in node["children"]:
                    find_specific_nodes(child)

    find_specific_nodes(ast_result["tree"])

    # Check for Display and Calculate traits, and create_value macro - handle both bytes and strings
    display_found = False
    calculate_found = False
    macro_found = False

    for name in traits_found:
        if (isinstance(name, bytes) and b"Display" in name) or (isinstance(name, str) and "Display" in name):
            display_found = True
        if (isinstance(name, bytes) and b"Calculate" in name) or (isinstance(name, str) and "Calculate" in name):
            calculate_found = True

    for name in macros_found:
        if (isinstance(name, bytes) and b"create_value" in name) or (isinstance(name, str) and "create_value" in name):
            macro_found = True

    assert display_found, "Should find Display trait"
    assert calculate_found, "Should find Calculate trait"
    assert macro_found, "Should find create_value macro"

```

--------------------------------------------------------------------------------
/tests/test_tree_sitter_helpers.py:
--------------------------------------------------------------------------------

```python
"""Tests for tree_sitter_helpers.py module."""

import tempfile
from pathlib import Path
from typing import Any, Dict

import pytest

from mcp_server_tree_sitter.utils.tree_sitter_helpers import (
    create_edit,
    edit_tree,
    find_all_descendants,
    get_changed_ranges,
    get_node_text,
    get_node_with_text,
    is_node_inside,
    parse_file_incremental,
    parse_file_with_detection,
    parse_source,
    parse_source_incremental,
    walk_tree,
)


# Fixtures
@pytest.fixture
def test_files() -> Dict[str, Path]:
    """Create temporary test files for different languages."""
    python_file = Path(tempfile.mktemp(suffix=".py"))
    js_file = Path(tempfile.mktemp(suffix=".js"))

    # Write Python test file
    with open(python_file, "w") as f:
        f.write(
            """def hello(name):
    print(f"Hello, {name}!")

class Person:
    def __init__(self, name, age):
        self.name = name
        self.age = age

    def greet(self):
        return f"Hi, I'm {self.name} and I'm {self.age} years old."

if __name__ == "__main__":
    person = Person("Alice", 30)
    print(person.greet())
"""
        )

    # Write JavaScript test file
    with open(js_file, "w") as f:
        f.write(
            """
function hello(name) {
    return `Hello, ${name}!`;
}

class Person {
    constructor(name, age) {
        this.name = name;
        this.age = age;
    }

    greet() {
        return `Hi, I'm ${this.name} and I'm ${this.age} years old.`;
    }
}

const person = new Person("Alice", 30);
console.log(person.greet());
"""
        )

    return {"python": python_file, "javascript": js_file}


@pytest.fixture
def parsed_files(test_files) -> Dict[str, Dict[str, Any]]:
    """Create parsed source trees for different languages."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()
    result = {}

    # Parse Python file
    py_parser = registry.get_parser("python")
    with open(test_files["python"], "rb") as f:
        py_source = f.read()
    py_tree = py_parser.parse(py_source)
    result["python"] = {
        "tree": py_tree,
        "source": py_source,
        "language": "python",
        "parser": py_parser,
    }

    # Parse JavaScript file
    js_parser = registry.get_parser("javascript")
    with open(test_files["javascript"], "rb") as f:
        js_source = f.read()
    js_tree = js_parser.parse(js_source)
    result["javascript"] = {
        "tree": js_tree,
        "source": js_source,
        "language": "javascript",
        "parser": js_parser,
    }

    return result


# Tests for file parsing functions
def test_parse_file_with_detection(test_files, tmp_path):
    """Test parsing a file."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()

    # Parse Python file
    tree, source = parse_file_with_detection(test_files["python"], "python", registry)
    assert tree is not None
    assert source is not None
    assert isinstance(source, bytes)
    assert len(source) > 0
    assert source.startswith(b"def hello")

    # Parse JavaScript file
    tree, source = parse_file_with_detection(test_files["javascript"], "javascript", registry)
    assert tree is not None
    assert source is not None
    assert isinstance(source, bytes)
    assert len(source) > 0
    assert b"function hello" in source


def test_parse_file_with_unknown_language(tmp_path):
    """Test handling of unknown language when parsing a file."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()

    # Create a file with unknown extension
    unknown_file = tmp_path / "test.unknown"
    with open(unknown_file, "w") as f:
        f.write("This is a test file with unknown language")

    # Try to parse with auto-detection (should fail gracefully)
    with pytest.raises(ValueError):
        parse_file_with_detection(unknown_file, None, registry)

    # Try to parse with explicit unknown language (should also fail)
    with pytest.raises(ValueError):
        parse_file_with_detection(unknown_file, "nonexistent_language", registry)


def test_parse_source(parsed_files):
    """Test parsing source code."""
    # Get Python parser and source
    py_parser = parsed_files["python"]["parser"]
    py_source = parsed_files["python"]["source"]

    # Parse source
    tree = parse_source(py_source, py_parser)
    assert tree is not None
    assert tree.root_node is not None
    assert tree.root_node.type == "module"

    # Get JavaScript parser and source
    js_parser = parsed_files["javascript"]["parser"]
    js_source = parsed_files["javascript"]["source"]

    # Parse source
    tree = parse_source(js_source, js_parser)
    assert tree is not None
    assert tree.root_node is not None
    assert tree.root_node.type == "program"


def test_parse_source_incremental(parsed_files):
    """Test incremental parsing of source code."""
    # Get Python parser, tree, and source
    py_parser = parsed_files["python"]["parser"]
    # Only source is needed for this test (tree is unused)
    py_source = parsed_files["python"]["source"]

    # Modify the source
    modified_source = py_source.replace(b"Hello", b"Greetings")

    # Parse with original tree
    original_tree = py_parser.parse(py_source)
    incremental_tree = parse_source_incremental(modified_source, original_tree, py_parser)

    # Verify the new tree reflects the changes
    assert incremental_tree is not None
    assert incremental_tree.root_node is not None
    node_text = get_node_text(incremental_tree.root_node, modified_source, decode=False)
    assert b"Greetings" in node_text


def test_edit_tree(parsed_files):
    """Test editing a syntax tree."""
    # Get Python tree and source
    py_tree = parsed_files["python"]["tree"]
    py_source = parsed_files["python"]["source"]

    # Find the position of "Hello" in the source
    hello_pos = py_source.find(b"Hello")
    assert hello_pos > 0

    # Create an edit to replace "Hello" with "Greetings"
    start_byte = hello_pos
    old_end_byte = hello_pos + len("Hello")
    new_end_byte = hello_pos + len("Greetings")
    edit = create_edit(
        start_byte,
        old_end_byte,
        new_end_byte,
        (0, hello_pos),
        (0, hello_pos + len("Hello")),
        (0, hello_pos + len("Greetings")),
    )

    # Apply the edit
    py_tree = edit_tree(py_tree, edit)

    # Modify the source to match the edit
    modified_source = py_source.replace(b"Hello", b"Greetings")

    # Verify the edited tree works with the modified source
    root_text = get_node_text(py_tree.root_node, modified_source, decode=False)
    assert b"Greetings" in root_text


def test_get_changed_ranges(parsed_files):
    """Test getting changed ranges between trees."""
    # Get Python parser, tree, and source
    py_parser = parsed_files["python"]["parser"]
    py_tree = parsed_files["python"]["tree"]
    py_source = parsed_files["python"]["source"]

    # Modify the source
    modified_source = py_source.replace(b"Hello", b"Greetings")

    # Parse the modified source
    modified_tree = py_parser.parse(modified_source)

    # Get the changed ranges
    ranges = get_changed_ranges(py_tree, modified_tree)

    # Verify we have changed ranges
    assert len(ranges) > 0
    assert isinstance(ranges[0], tuple)
    assert len(ranges[0]) == 2  # (start_byte, end_byte)


def test_get_node_text(parsed_files):
    """Test extracting text from a node."""
    # Get Python tree and source
    py_tree = parsed_files["python"]["tree"]
    py_source = parsed_files["python"]["source"]

    # Get text from root node
    root_text = get_node_text(py_tree.root_node, py_source, decode=False)
    assert isinstance(root_text, bytes)
    assert root_text == py_source

    # Get text from a specific node (e.g., first function definition)
    function_node = None
    cursor = walk_tree(py_tree.root_node)
    while cursor.goto_first_child():
        if cursor.node.type == "function_definition":
            function_node = cursor.node
            break

    assert function_node is not None
    function_text = get_node_text(function_node, py_source, decode=False)
    assert isinstance(function_text, bytes)
    assert b"def hello" in function_text


def test_get_node_with_text(parsed_files):
    """Test finding a node with specific text."""
    # Get Python tree and source
    py_tree = parsed_files["python"]["tree"]
    py_source = parsed_files["python"]["source"]

    # Find node containing "Hello"
    hello_node = get_node_with_text(py_tree.root_node, py_source, b"Hello")
    assert hello_node is not None
    node_text = get_node_text(hello_node, py_source, decode=False)
    assert b"Hello" in node_text


def test_walk_tree(parsed_files):
    """Test walking a tree with cursor."""
    # Get Python tree
    py_tree = parsed_files["python"]["tree"]

    # Walk the tree and collect node types
    node_types = []
    cursor = walk_tree(py_tree.root_node)
    node_types.append(cursor.node.type)

    # Go to first child (should be function_definition)
    assert cursor.goto_first_child()
    node_types.append(cursor.node.type)

    # Go to next sibling
    while cursor.goto_next_sibling():
        node_types.append(cursor.node.type)

    # Go back to parent
    assert cursor.goto_parent()
    assert cursor.node.type == "module"

    # Verify we found some nodes
    assert len(node_types) > 0
    assert "module" in node_types
    assert "function_definition" in node_types or "def" in node_types


def test_is_node_inside(parsed_files):
    """Test checking if a node is inside another."""
    # Get Python tree
    py_tree = parsed_files["python"]["tree"]

    # Get root node and first child
    root_node = py_tree.root_node
    assert root_node.child_count > 0
    child_node = root_node.children[0]

    # Verify child is inside root
    assert is_node_inside(child_node, root_node)
    assert not is_node_inside(root_node, child_node)
    assert is_node_inside(child_node, child_node)  # Node is inside itself

    # Test with specific positions
    # Root node contains all positions in the file
    assert is_node_inside((0, 0), root_node)
    # First line should be within first child
    assert is_node_inside((0, 5), child_node)
    # Invalid position outside file
    assert not is_node_inside((999, 0), root_node)


def test_find_all_descendants(parsed_files):
    """Test finding all descendants of a node."""
    # Get Python tree
    py_tree = parsed_files["python"]["tree"]

    # Get all descendants
    all_descendants = find_all_descendants(py_tree.root_node)
    assert len(all_descendants) > 0

    # Get descendants with depth limit
    limited_descendants = find_all_descendants(py_tree.root_node, max_depth=2)

    # Verify depth limiting works (there should be fewer descendants)
    assert len(limited_descendants) <= len(all_descendants)


# Test edge cases and error handling
def test_get_node_text_with_invalid_byte_range(parsed_files):
    """Test get_node_text with invalid byte range."""
    # Only source is needed for this test
    py_source = parsed_files["python"]["source"]

    # Create a node with an invalid byte range by modifying properties
    # This is a bit of a hack, but it's effective for testing error handling
    class MockNode:
        def __init__(self):
            self.start_byte = len(py_source) + 100  # Beyond source length
            self.end_byte = len(py_source) + 200
            self.type = "invalid"
            self.start_point = (999, 0)
            self.end_point = (999, 10)
            self.is_named = True

    # Create mock node and try to get text
    mock_node = MockNode()
    result = get_node_text(mock_node, py_source, decode=False)

    # Should return empty bytes for invalid range
    assert result == b""


def test_parse_file_incremental(test_files, tmp_path):
    """Test incremental parsing of a file."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()

    # Initial parse
    tree1, source1 = parse_file_with_detection(test_files["python"], "python", registry)

    # Create a modified version of the file
    modified_file = tmp_path / "modified.py"
    with open(test_files["python"], "rb") as f:
        content = f.read()
    modified_content = content.replace(b"Hello", b"Greetings")
    with open(modified_file, "wb") as f:
        f.write(modified_content)

    # Parse incrementally
    tree2, source2 = parse_file_incremental(modified_file, tree1, "python", registry)

    # Verify the new tree reflects the changes
    assert tree2 is not None
    assert source2 is not None
    assert b"Greetings" in source2
    assert b"Greetings" in get_node_text(tree2.root_node, source2, decode=False)


def test_parse_file_nonexistent():
    """Test handling of nonexistent file."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()

    # Try to parse a nonexistent file
    with pytest.raises(FileNotFoundError):
        parse_file_with_detection(Path("/nonexistent/file.py"), "python", registry)


def test_parse_file_without_language(test_files):
    """Test parsing a file without specifying language."""
    from mcp_server_tree_sitter.language.registry import LanguageRegistry

    registry = LanguageRegistry()

    # Parse Python file by auto-detecting language from extension
    tree, source = parse_file_with_detection(test_files["python"], None, registry)
    assert tree is not None
    assert source is not None
    assert isinstance(source, bytes)
    assert len(source) > 0
    assert tree.root_node.type == "module"  # Python tree

```

--------------------------------------------------------------------------------
/src/mcp_server_tree_sitter/cache/parser_cache.py:
--------------------------------------------------------------------------------

```python
"""Caching system for tree-sitter parse trees."""

import logging
import threading
import time
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, Optional, Tuple

# Import global_context at runtime to avoid circular imports
from ..utils.tree_sitter_types import (
    Parser,
    Tree,
    ensure_language,
    ensure_parser,
    ensure_tree,
)

logger = logging.getLogger(__name__)


class TreeCache:
    """Cache for parsed syntax trees."""

    def __init__(self, max_size_mb: Optional[int] = None, ttl_seconds: Optional[int] = None):
        """Initialize the tree cache with explicit size and TTL settings."""
        self.cache: Dict[str, Tuple[Any, bytes, float]] = {}  # (tree, source, timestamp)
        self.lock = threading.RLock()
        self.current_size_bytes = 0
        self.modified_trees: Dict[str, bool] = {}
        self.max_size_mb = max_size_mb or 100
        self.ttl_seconds = ttl_seconds or 300
        self.enabled = True

    def _get_cache_key(self, file_path: Path, language: str) -> str:
        """Generate cache key from file path and language."""
        return f"{language}:{str(file_path)}:{file_path.stat().st_mtime}"

    def set_enabled(self, enabled: bool) -> None:
        """Set whether caching is enabled."""
        self.enabled = enabled

    def set_max_size_mb(self, max_size_mb: int) -> None:
        """Set maximum cache size in MB."""
        self.max_size_mb = max_size_mb

    def set_ttl_seconds(self, ttl_seconds: int) -> None:
        """Set TTL for cache entries in seconds."""
        self.ttl_seconds = ttl_seconds

    def _get_max_size_mb(self) -> float:
        """Get current max size setting."""
        # Always get the latest from container config
        try:
            from ..di import get_container

            config = get_container().get_config()
            return config.cache.max_size_mb if self.enabled else 0  # Return 0 if disabled
        except (ImportError, AttributeError):
            # Fallback to instance value if container unavailable
            return self.max_size_mb

    def _get_ttl_seconds(self) -> int:
        """Get current TTL setting."""
        # Always get the latest from container config
        try:
            from ..di import get_container

            config = get_container().get_config()
            return config.cache.ttl_seconds
        except (ImportError, AttributeError):
            # Fallback to instance value if container unavailable
            return self.ttl_seconds

    def _is_cache_enabled(self) -> bool:
        """Check if caching is enabled."""
        # Honor both local setting and container config
        try:
            from ..di import get_container

            config = get_container().get_config()
            is_enabled = self.enabled and config.cache.enabled
            # For very small caches, log the state
            if not is_enabled:
                logger.debug(
                    f"Cache disabled: self.enabled={self.enabled}, config.cache.enabled={config.cache.enabled}"
                )
            return is_enabled
        except (ImportError, AttributeError):
            # Fallback to instance value if container unavailable
            return self.enabled

    def get(self, file_path: Path, language: str) -> Optional[Tuple[Tree, bytes]]:
        """
        Get cached tree if available and not expired.

        Args:
            file_path: Path to the source file
            language: Language identifier

        Returns:
            Tuple of (tree, source_bytes) if cached, None otherwise
        """
        # Check if caching is enabled
        if not self._is_cache_enabled():
            return None

        try:
            cache_key = self._get_cache_key(file_path, language)
        except (FileNotFoundError, OSError):
            return None

        with self.lock:
            if cache_key in self.cache:
                tree, source, timestamp = self.cache[cache_key]

                # Check if cache entry has expired (using current config TTL)
                ttl_seconds = self._get_ttl_seconds()
                current_time = time.time()
                entry_age = current_time - timestamp
                if entry_age > ttl_seconds:
                    logger.debug(f"Cache entry expired: age={entry_age:.2f}s, ttl={ttl_seconds}s")
                    del self.cache[cache_key]
                    # Approximate size reduction
                    self.current_size_bytes -= len(source)
                    if cache_key in self.modified_trees:
                        del self.modified_trees[cache_key]
                    return None

                # Cast to the correct type for type checking
                safe_tree = ensure_tree(tree)
                return safe_tree, source

        return None

    def put(self, file_path: Path, language: str, tree: Tree, source: bytes) -> None:
        """
        Cache a parsed tree.

        Args:
            file_path: Path to the source file
            language: Language identifier
            tree: Parsed tree
            source: Source bytes
        """
        # Check if caching is enabled
        is_enabled = self._is_cache_enabled()
        if not is_enabled:
            logger.debug(f"Skipping cache for {file_path}: caching is disabled")
            return

        try:
            cache_key = self._get_cache_key(file_path, language)
        except (FileNotFoundError, OSError):
            return

        source_size = len(source)

        # Check if adding this entry would exceed cache size limit (using current max size)
        max_size_mb = self._get_max_size_mb()
        max_size_bytes = max_size_mb * 1024 * 1024

        # If max_size is 0 or very small, disable caching
        if max_size_bytes <= 1024:  # If less than 1KB, don't cache
            logger.debug(f"Cache size too small: {max_size_mb}MB, skipping cache")
            return

        if source_size > max_size_bytes:
            logger.warning(f"File too large to cache: {file_path} ({source_size / (1024 * 1024):.2f}MB)")
            return

        with self.lock:
            # If entry already exists, subtract its size
            if cache_key in self.cache:
                _, old_source, _ = self.cache[cache_key]
                self.current_size_bytes -= len(old_source)
            else:
                # If we need to make room for a new entry, remove oldest entries
                if self.current_size_bytes + source_size > max_size_bytes:
                    self._evict_entries(source_size)

            # Store the new entry
            self.cache[cache_key] = (tree, source, time.time())
            self.current_size_bytes += source_size
            logger.debug(
                f"Added entry to cache: {file_path}, size: {source_size / 1024:.1f}KB, "
                f"total cache: {self.current_size_bytes / (1024 * 1024):.2f}MB"
            )

            # Mark as not modified (fresh parse)
            self.modified_trees[cache_key] = False

    def mark_modified(self, file_path: Path, language: str) -> None:
        """
        Mark a tree as modified for tracking changes.

        Args:
            file_path: Path to the source file
            language: Language identifier
        """
        try:
            cache_key = self._get_cache_key(file_path, language)
            with self.lock:
                if cache_key in self.cache:
                    self.modified_trees[cache_key] = True
        except (FileNotFoundError, OSError):
            pass

    def is_modified(self, file_path: Path, language: str) -> bool:
        """
        Check if a tree has been modified since last parse.

        Args:
            file_path: Path to the source file
            language: Language identifier

        Returns:
            True if the tree has been modified, False otherwise
        """
        try:
            cache_key = self._get_cache_key(file_path, language)
            with self.lock:
                return self.modified_trees.get(cache_key, False)
        except (FileNotFoundError, OSError):
            return False

    def update_tree(self, file_path: Path, language: str, tree: Tree, source: bytes) -> None:
        """
        Update a cached tree after modification.

        Args:
            file_path: Path to the source file
            language: Language identifier
            tree: Updated parsed tree
            source: Updated source bytes
        """
        try:
            cache_key = self._get_cache_key(file_path, language)
        except (FileNotFoundError, OSError):
            return

        with self.lock:
            if cache_key in self.cache:
                _, old_source, _ = self.cache[cache_key]
                # Update size tracking
                self.current_size_bytes -= len(old_source)
                self.current_size_bytes += len(source)
                # Update cache entry
                self.cache[cache_key] = (tree, source, time.time())
                # Reset modified flag
                self.modified_trees[cache_key] = False
            else:
                # If not already in cache, just add it
                self.put(file_path, language, tree, source)

    def _evict_entries(self, required_bytes: int) -> None:
        """
        Evict entries to make room for new data.

        Args:
            required_bytes: Number of bytes to make room for
        """
        # Get current max size from config
        max_size_mb = self._get_max_size_mb()
        max_size_bytes = max_size_mb * 1024 * 1024

        # Check if we actually need to evict anything
        if self.current_size_bytes + required_bytes <= max_size_bytes:
            return

        # If cache is empty (happens in tests sometimes), nothing to evict
        if not self.cache:
            return

        # Sort by timestamp (oldest first)
        sorted_entries = sorted(self.cache.items(), key=lambda item: item[1][2])

        bytes_freed = 0
        entries_removed = 0

        # Force removal of at least one entry in tests with very small caches (< 0.1MB)
        force_removal = max_size_mb < 0.1
        target_to_free = required_bytes

        # If cache is small, make sure we remove at least one item
        min_entries_to_remove = 1

        # If cache is very small, removing any entry should be enough
        if force_removal or max_size_bytes < 10 * 1024:  # Less than 10KB
            # For tests with very small caches, we need to be more aggressive
            target_to_free = self.current_size_bytes // 2  # Remove half the cache
            min_entries_to_remove = max(1, len(self.cache) // 2)
            logger.debug(f"Small cache detected ({max_size_mb}MB), removing {min_entries_to_remove} entries")

        # If cache is already too full, free more space to prevent continuous evictions
        elif self.current_size_bytes > max_size_bytes * 0.9:
            target_to_free += int(max_size_bytes * 0.2)  # Free extra 20%
            min_entries_to_remove = max(1, len(self.cache) // 4)

        for key, (_, source, _) in sorted_entries:
            # Remove entry
            del self.cache[key]
            if key in self.modified_trees:
                del self.modified_trees[key]

            entry_size = len(source)
            bytes_freed += entry_size
            self.current_size_bytes -= entry_size
            entries_removed += 1

            # Stop once we've freed enough space AND removed minimum entries
            if bytes_freed >= target_to_free and entries_removed >= min_entries_to_remove:
                break

        # Log the eviction with appropriate level
        log_msg = (
            f"Evicted {entries_removed} cache entries, freed {bytes_freed / 1024:.1f}KB, "
            f"current size: {self.current_size_bytes / (1024 * 1024):.2f}MB"
        )
        if force_removal:
            logger.debug(log_msg)
        else:
            logger.info(log_msg)

    def invalidate(self, file_path: Optional[Path] = None) -> None:
        """
        Invalidate cache entries.

        Args:
            file_path: If provided, invalidate only entries for this file.
                      If None, invalidate the entire cache.
        """
        with self.lock:
            if file_path is None:
                # Clear entire cache
                self.cache.clear()
                self.modified_trees.clear()
                self.current_size_bytes = 0
            else:
                # Clear only entries for this file
                keys_to_remove = [key for key in self.cache if str(file_path) in key]
                for key in keys_to_remove:
                    _, source, _ = self.cache[key]
                    self.current_size_bytes -= len(source)
                    del self.cache[key]
                    if key in self.modified_trees:
                        del self.modified_trees[key]


# The TreeCache is now initialized and managed by the DependencyContainer in di.py
# No global instance is needed here anymore.


# The following function is maintained for backward compatibility
def get_tree_cache() -> TreeCache:
    """Get the tree cache from the dependency container."""
    from ..di import get_container

    tree_cache = get_container().tree_cache
    return tree_cache


@lru_cache(maxsize=32)
def get_cached_parser(language: Any) -> Parser:
    """Get a cached parser for a language."""
    parser = Parser()
    safe_language = ensure_language(language)

    # Try both set_language and language methods
    try:
        parser.set_language(safe_language)  # type: ignore
    except AttributeError:
        if hasattr(parser, "language"):
            # Use the language method if available
            parser.language = safe_language  # type: ignore
        else:
            # Fallback to setting the attribute directly
            parser.language = safe_language  # type: ignore

    return ensure_parser(parser)

```

--------------------------------------------------------------------------------
/FEATURES.md:
--------------------------------------------------------------------------------

```markdown
# MCP Tree-sitter Server: Feature Matrix

This document provides a comprehensive overview of all MCP Tree-sitter server commands, their status, dependencies, and common usage patterns. It serves as both a reference guide and a test matrix for ongoing development.

## Table of Contents
- [Supported Languages](#supported-languages)
- [Command Status Legend](#command-status-legend)
- [Command Reference](#command-reference)
  - [Project Management Commands](#project-management-commands)
  - [Language Tools Commands](#language-tools-commands)
  - [File Operations Commands](#file-operations-commands)
  - [AST Analysis Commands](#ast-analysis-commands)
  - [Search and Query Commands](#search-and-query-commands)
  - [Code Analysis Commands](#code-analysis-commands)
  - [Cache Management Commands](#cache-management-commands)
- [Implementation Status](#implementation-status)
  - [Language Pack Integration](#language-pack-integration)
  - [Implementation Gaps](#implementation-gaps)
  - [MCP SDK Implementation](#mcp-sdk-implementation)
- [Implementation Notes](#implementation-notes)
- [Testing Guidelines](#testing-guidelines)
- [Implementation Progress](#implementation-progress)

---

## Supported Languages

The following programming languages are fully supported with symbol extraction, AST analysis, and query capabilities:

| Language | Symbol Extraction | AST Analysis | Query Support |
|----------|-------------------|--------------|--------------|  
| Python | ✅ | ✅ | ✅ |
| JavaScript | ✅ | ✅ | ✅ |
| TypeScript | ✅ | ✅ | ✅ |
| Go | ✅ | ✅ | ✅ |
| Rust | ✅ | ✅ | ✅ |
| C | ✅ | ✅ | ✅ |
| C++ | ✅ | ✅ | ✅ |
| Swift | ✅ | ✅ | ✅ |
| Java | ✅ | ✅ | ✅ |
| Kotlin | ✅ | ✅ | ✅ |
| Julia | ✅ | ✅ | ✅ |
| APL | ✅ | ✅ | ✅ |

Additional languages are available via tree-sitter-language-pack, including Bash, C#, Clojure, Elixir, Elm, Haskell, Lua, Objective-C, OCaml, PHP, Protobuf, Ruby, Scala, SCSS, SQL, and XML.

---

## Command Status Legend

| Status | Meaning |
|--------|---------|
| ✅ | Working - Feature is fully operational |
| ⚠️ | Partially Working - Feature works with limitations or in specific conditions |
| ❌ | Not Working - Feature fails or is unavailable |
| 🔄 | Requires Dependency - Needs external components (e.g., language parsers) |

---

## Command Reference

### Project Management Commands

These commands handle project registration and management.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `register_project_tool` | ✅ | None | Successfully registers projects with path, name, and description |
| `list_projects_tool` | ✅ | None | Successfully lists all registered projects |
| `remove_project_tool` | ✅ | None | Successfully removes registered projects |

**Example Usage:**
```python
# Register a project
register_project_tool(path="/path/to/project", name="my-project", description="My awesome project")

# List all projects
list_projects_tool()

# Remove a project
remove_project_tool(name="my-project")
```

### Language Tools Commands

These commands manage tree-sitter language parsers.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `list_languages` | ✅ | None | Lists all available languages from tree-sitter-language-pack |
| `check_language_available` | ✅ | None | Checks if a specific language is available via tree-sitter-language-pack |

**Example Usage:**
```python
# List all available languages
list_languages()

# Check if a specific language is available
check_language_available(language="python")
```

### File Operations Commands

These commands access and manipulate project files.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `list_files` | ✅ | Project registration | Successfully lists files with optional filtering |
| `get_file` | ✅ | Project registration | Successfully retrieves file content |
| `get_file_metadata` | ✅ | Project registration | Returns file information including size, modification time, etc. |

**Example Usage:**
```python
# List Python files
list_files(project="my-project", pattern="**/*.py")

# Get file content
get_file(project="my-project", path="src/main.py")

# Get file metadata
get_file_metadata(project="my-project", path="src/main.py")
```

### AST Analysis Commands

These commands perform abstract syntax tree (AST) operations.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `get_ast` | ✅ | Project registration | Returns AST using efficient cursor-based traversal with proper node IDs |
| `get_node_at_position` | ✅ | Project registration | Successfully retrieves nodes at a specific position in a file |

**Example Usage:**
```python
# Get AST for a file
get_ast(project="my-project", path="src/main.py", max_depth=5, include_text=True)

# Find node at position
get_node_at_position(project="my-project", path="src/main.py", row=10, column=5)
```

### Search and Query Commands

These commands search code and execute tree-sitter queries.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `find_text` | ✅ | Project registration | Text search works correctly with pattern matching |
| `run_query` | ✅ | Project registration, Language | Successfully executes tree-sitter queries and returns results |
| `get_query_template_tool` | ✅ | None | Successfully returns templates when available |
| `list_query_templates_tool` | ✅ | None | Successfully lists available templates |
| `build_query` | ✅ | None | Successfully builds and combines query templates |
| `adapt_query` | ✅ | None | Successfully adapts queries between different languages |
| `get_node_types` | ✅ | None | Successfully returns descriptions of node types for a language |

**Example Usage:**
```python
# Find text in project files
find_text(project="my-project", pattern="TODO", file_pattern="**/*.py")

# Run a tree-sitter query
run_query(
    project="my-project",
    query="(function_definition name: (identifier) @function.name) @function.def",
    file_path="src/main.py",
    language="python"
)

# List query templates for a language
list_query_templates_tool(language="python")

# Get descriptions of node types
get_node_types(language="python")
```

### Code Analysis Commands

These commands analyze code structure and complexity.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `get_symbols` | ✅ | Project registration | Successfully extracts symbols (functions, classes, imports) from files |
| `analyze_project` | ✅ | Project registration | Project structure analysis works with support for detailed code analysis |
| `get_dependencies` | ✅ | Project registration | Successfully identifies dependencies from import statements |
| `analyze_complexity` | ✅ | Project registration | Provides accurate code complexity metrics |
| `find_similar_code` | ⚠️ | Project registration | Execution successful but no results returned in testing |
| `find_usage` | ✅ | Project registration | Successfully finds usage of symbols across project files |

**Example Usage:**
```python
# Extract symbols from a file
get_symbols(project="my-project", file_path="src/main.py")

# Analyze project structure
analyze_project(project="my-project", scan_depth=3)

# Get dependencies for a file
get_dependencies(project="my-project", file_path="src/main.py")

# Analyze code complexity
analyze_complexity(project="my-project", file_path="src/main.py")

# Find similar code
find_similar_code(
    project="my-project",
    snippet="print('Hello, world!')",
    language="python"
)

# Find symbol usage
find_usage(project="my-project", symbol="main", language="python")
```

### Configuration Management Commands

These commands manage the service and its parse tree cache.

| Command | Status | Dependencies | Notes |
|---------|--------|--------------|-------|
| `clear_cache` | ✅ | None | Successfully clears caches at all levels (global, project, or file) |
| `configure` | ✅ | None | Successfully configures cache, log level, and other settings |
| `diagnose_config` | ✅ | None | Diagnoses issues with YAML configuration loading |

**Example Usage:**
```python
# Clear all caches
clear_cache()

# Clear cache for a specific project
clear_cache(project="my-project")

# Configure cache settings
configure(cache_enabled=True, max_file_size_mb=10, log_level="DEBUG")

# Diagnose configuration issues
diagnose_config(config_path="/path/to/config.yaml")
```

---

## Implementation Status

### Language Pack Integration

The integration of tree-sitter-language-pack is complete with comprehensive language support. All 31 languages are available and functional.

| Feature Area | Status | Test Results |
|--------------|--------|--------------|
| Language Tools | ✅ Working | All tests pass. Language tools properly report and list available languages |
| AST Analysis | ✅ Working | All tests pass. `get_ast` and `get_node_at_position` work correctly with proper node IDs and AST traversal operations |
| Search Queries | ✅ Working | All tests pass. Text search works, query building works, and tree-sitter query execution returns expected results |
| Code Analysis | ✅ Working | All tests pass. Structure and complexity analysis works, symbol extraction and dependency analysis provide useful results |

**Current Integration Capabilities:**
- AST functionality works well for retrieving and traversing trees and nodes
- Query execution and result handling work correctly
- Symbol extraction and dependency analysis provide useful results
- Project management, file operations, and search features work correctly

### Implementation Gaps

Based on the latest tests as of March 18, 2025, these are the current implementation gaps:

#### Tree Editing and Incremental Parsing
- **Status:** ⚠️ Partially Working
- Core AST functionality works
- Tree manipulation functionality requires additional implementation

#### Tree Cursor API
- **Status:** ✅ Fully Working
- AST node traversal works correctly
- Cursor-based tree walking is efficient and reliable
- Can be extended for more advanced semantic analysis

#### Similar Code Detection
- **Status:** ⚠️ Partially Working
- Command executes successfully but testing did not yield results
- May require more specific snippets or fine-tuning of similarity thresholds

#### UTF-16 Support
- **Status:** ❌ Not Implemented
- Encoding detection and support is not yet available
- Will require parser improvements after core AST functionality is fixed

#### Read Callable Support
- **Status:** ❌ Not Implemented
- Custom read strategies are not yet available
- Streaming parsing for large files remains unavailable

### MCP SDK Implementation

| Feature | Status | Notes |
|---------|--------|-------|
| Application Lifecycle Management | ✅ Working | Basic lifespan support is functioning correctly |
| Image Handling | ❌ Not Implemented | No support for returning images from tools |
| MCP Context Handling | ⚠️ Partial | Basic context access works, but progress reporting not fully implemented |
| Claude Desktop Integration | ✅ Working | MCP server can be installed in Claude Desktop |
| Server Capabilities Declaration | ✅ Working | Capabilities are properly declared |

---

## Implementation Notes

This project uses a structured dependency injection (DI) pattern, but still has global singletons at its core:

1. A central `DependencyContainer` singleton that holds all shared services
2. A `global_context` object that provides a convenient interface to the container
3. API functions that access the container internally

This architecture provides three main ways to access functionality:

```python
# Option 1: API Functions (preferred for most use cases)
from mcp_server_tree_sitter.api import get_config, get_language_registry

config = get_config()
languages = get_language_registry().list_available_languages()

# Option 2: Direct Container Access
from mcp_server_tree_sitter.di import get_container

container = get_container()
project_registry = container.project_registry
tree_cache = container.tree_cache

# Option 3: Global Context
from mcp_server_tree_sitter.context import global_context

config = global_context.get_config()
result = global_context.register_project("/path/to/project")
```

The dependency injection approach helps make the code more testable and maintainable, even though it still uses singletons internally.

---

## Testing Guidelines

When testing the MCP Tree-sitter server, use this structured approach:

1. **Project Setup**
   - Register a project with `register_project_tool`
   - Verify registration with `list_projects_tool`

2. **Basic File Operations**
   - Test `list_files` to ensure project access
   - Test `get_file` to verify content retrieval
   - Test `get_file_metadata` to check file information

3. **Language Parser Verification**
   - Test `check_language_available` to verify specific language support
   - Use `list_languages` to see all available languages

4. **Feature Testing**
   - Test AST operations with `get_ast` to ensure proper node IDs and structure
   - Test query execution with `run_query` to verify proper result capture
   - Test symbol extraction with `get_symbols` to verify proper function, class, and import detection
   - Test dependency analysis with `get_dependencies` to verify proper import detection
   - Test complexity analysis with `analyze_complexity` to verify metrics are being calculated correctly
   - Test usage finding with `find_usage` to verify proper symbol reference detection

5. **Test Outcomes**
   - All 185 tests now pass successfully
   - No diagnostic errors reported
   - Core functionality works reliably across all test cases

---

## Implementation Progress

Based on the test results as of March 18, 2025, all critical functionality is now working:

1. **✅ Tree-Sitter Query Result Handling**
   - Query result handling works correctly
   - Queries execute and return proper results with correct capture processing

2. **✅ Tree Cursor Functionality**
   - Tree cursor-based traversal is working correctly
   - Efficient navigation and analysis of ASTs is now possible

3. **✅ AST Node ID Generation**
   - AST nodes are correctly assigned unique IDs
   - Node traversal and reference works reliably

4. **✅ Symbol Extraction**
   - Symbol extraction correctly identifies functions, classes, and imports
   - Location information is accurate

5. **✅ Dependency Analysis**
   - Dependency analysis correctly identifies imports and references
   - Properly handles different import styles

6. **✅ Code Complexity Analysis**
   - Complexity metrics are calculated correctly
   - Line counts, cyclomatic complexity, and other metrics are accurate

7. **⚠️ Similar Code Detection**
   - Command completes execution but testing did not yield results
   - May need further investigation with more appropriate test cases

8. **Future Work: Complete MCP Context Progress Reporting**
   - Add progress reporting for long-running operations to improve user experience

---

This feature matrix reflects test results as of March 18, 2025. All core functionality is now working correctly, with only minor issues in similar code detection. The project is fully operational with all 185 tests passing successfully.

```

--------------------------------------------------------------------------------
/tests/test_helpers.py:
--------------------------------------------------------------------------------

```python
"""Helper functions for tests using the new dependency injection pattern."""

import logging
from contextlib import contextmanager
from typing import Any, Dict, List, Optional

from mcp_server_tree_sitter.api import (
    clear_cache as api_clear_cache,
)
from mcp_server_tree_sitter.api import (
    get_config,
    get_language_registry,
    get_project_registry,
    get_tree_cache,
)
from mcp_server_tree_sitter.api import (
    list_projects as api_list_projects,
)
from mcp_server_tree_sitter.api import (
    register_project as api_register_project,
)
from mcp_server_tree_sitter.api import (
    remove_project as api_remove_project,
)
from mcp_server_tree_sitter.di import get_container
from mcp_server_tree_sitter.language.query_templates import (
    get_query_template,
    list_query_templates,
)
from mcp_server_tree_sitter.tools.analysis import (
    analyze_code_complexity,
    analyze_project_structure,
    extract_symbols,
    find_dependencies,
)
from mcp_server_tree_sitter.tools.ast_operations import find_node_at_position as ast_find_node_at_position
from mcp_server_tree_sitter.tools.ast_operations import get_file_ast as ast_get_file_ast
from mcp_server_tree_sitter.tools.file_operations import (
    get_file_content,
    get_file_info,
    list_project_files,
)
from mcp_server_tree_sitter.tools.query_builder import (
    adapt_query_for_language,
    build_compound_query,
    describe_node_types,
)
from mcp_server_tree_sitter.tools.search import query_code, search_text


@contextmanager
def temp_config(**kwargs):
    """
    Context manager for temporarily changing configuration settings.

    Args:
        **kwargs: Configuration values to change temporarily
    """
    # Get container and save original values
    container = get_container()
    config_manager = container.config_manager
    original_values = {}

    # Apply configuration changes
    for key, value in kwargs.items():
        # For tree_cache settings that need to be applied directly
        if key == "cache.enabled":
            original_values["tree_cache.enabled"] = container.tree_cache.enabled
            container.tree_cache.set_enabled(value)

        if key == "cache.max_size_mb":
            original_values["tree_cache.max_size_mb"] = container.tree_cache._get_max_size_mb()
            container.tree_cache.set_max_size_mb(value)

        # Handle log level specially
        if key == "log_level":
            # Save the original logger level
            root_logger = logging.getLogger("mcp_server_tree_sitter")
            original_values["root_logger_level"] = root_logger.level

            # Apply the new level directly
            log_level_value = getattr(logging, value, None)
            if log_level_value is not None:
                root_logger.setLevel(log_level_value)
                logging.debug(f"Set root logger to {value} in temp_config")

        # Update config manager values
        config_manager.update_value(key, value)

    try:
        yield
    finally:
        # Restore original values
        for key, value in original_values.items():
            if key == "tree_cache.enabled":
                container.tree_cache.set_enabled(value)
            elif key == "tree_cache.max_size_mb":
                container.tree_cache.set_max_size_mb(value)
            elif key == "root_logger_level":
                # Restore original logger level
                root_logger = logging.getLogger("mcp_server_tree_sitter")
                root_logger.setLevel(value)
                logging.debug(f"Restored root logger level to {value} in temp_config")

        # Re-apply original config values to config manager
        current_config = container.get_config()
        for key, _value in kwargs.items():
            parts = key.split(".")
            if len(parts) == 2:
                section, setting = parts
                if hasattr(current_config, section):
                    section_obj = getattr(current_config, section)
                    if hasattr(section_obj, setting):
                        # Get the original value from container's config
                        original_config = container.config_manager.get_config()
                        original_section = getattr(original_config, section, None)
                        if original_section and hasattr(original_section, setting):
                            original_value = getattr(original_section, setting)
                            config_manager.update_value(key, original_value)
            elif hasattr(current_config, key):
                # Handle top-level attributes like log_level
                original_config = container.config_manager.get_config()
                if hasattr(original_config, key):
                    original_value = getattr(original_config, key)
                    config_manager.update_value(key, original_value)


# Project Management Tools
def register_project_tool(path: str, name: Optional[str] = None, description: Optional[str] = None) -> Dict[str, Any]:
    """Register a project directory for code exploration."""
    return api_register_project(path, name, description)


def list_projects_tool() -> List[Dict[str, Any]]:
    """List all registered projects."""
    return api_list_projects()


def remove_project_tool(name: str) -> Dict[str, str]:
    """Remove a registered project."""
    return api_remove_project(name)


# Language Tools
def list_languages() -> Dict[str, Any]:
    """List available languages."""
    language_registry = get_language_registry()
    available = language_registry.list_available_languages()
    return {
        "available": available,
        "installable": [],  # No separate installation needed with language-pack
    }


def check_language_available(language: str) -> Dict[str, str]:
    """Check if a tree-sitter language parser is available."""
    language_registry = get_language_registry()
    if language_registry.is_language_available(language):
        return {
            "status": "success",
            "message": f"Language '{language}' is available via tree-sitter-language-pack",
        }
    else:
        return {
            "status": "error",
            "message": f"Language '{language}' is not available",
        }


# File Operations
def list_files(
    project: str,
    pattern: Optional[str] = None,
    max_depth: Optional[int] = None,
    extensions: Optional[List[str]] = None,
) -> List[str]:
    """List files in a project."""
    project_registry = get_project_registry()
    return list_project_files(project_registry.get_project(project), pattern, max_depth, extensions)


def get_file(project: str, path: str, max_lines: Optional[int] = None, start_line: int = 0) -> str:
    """Get content of a file."""
    project_registry = get_project_registry()
    return get_file_content(project_registry.get_project(project), path, max_lines=max_lines, start_line=start_line)


def get_file_metadata(project: str, path: str) -> Dict[str, Any]:
    """Get metadata for a file."""
    project_registry = get_project_registry()
    return get_file_info(project_registry.get_project(project), path)


# AST Analysis
def get_ast(project: str, path: str, max_depth: Optional[int] = None, include_text: bool = True) -> Dict[str, Any]:
    """Get abstract syntax tree for a file."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()
    tree_cache = get_tree_cache()
    config = get_config()

    depth = max_depth or config.language.default_max_depth

    return ast_get_file_ast(
        project_registry.get_project(project),
        path,
        language_registry,
        tree_cache,
        max_depth=depth,
        include_text=include_text,
    )


def get_node_at_position(project: str, path: str, row: int, column: int) -> Optional[Dict[str, Any]]:
    """Find the AST node at a specific position."""
    from mcp_server_tree_sitter.models.ast import node_to_dict

    project_registry = get_project_registry()
    project_obj = project_registry.get_project(project)
    file_path = project_obj.get_file_path(path)

    language_registry = get_language_registry()
    language = language_registry.language_for_file(path)
    if not language:
        raise ValueError(f"Could not detect language for {path}")

    from mcp_server_tree_sitter.tools.ast_operations import parse_file

    tree, source_bytes = parse_file(file_path, language, language_registry, get_tree_cache())

    node = ast_find_node_at_position(tree.root_node, row, column)
    if node:
        return node_to_dict(node, source_bytes, max_depth=2)

    return None


# Search and Query Tools
def find_text(
    project: str,
    pattern: str,
    file_pattern: Optional[str] = None,
    max_results: int = 100,
    case_sensitive: bool = False,
    whole_word: bool = False,
    use_regex: bool = False,
    context_lines: int = 2,
) -> List[Dict[str, Any]]:
    """Search for text pattern in project files."""
    project_registry = get_project_registry()
    return search_text(
        project_registry.get_project(project),
        pattern,
        file_pattern,
        max_results,
        case_sensitive,
        whole_word,
        use_regex,
        context_lines,
    )


def run_query(
    project: str,
    query: str,
    file_path: Optional[str] = None,
    language: Optional[str] = None,
    max_results: int = 100,
) -> List[Dict[str, Any]]:
    """Run a tree-sitter query on project files."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()
    tree_cache = get_tree_cache()

    return query_code(
        project_registry.get_project(project),
        query,
        language_registry,
        tree_cache,
        file_path,
        language,
        max_results,
    )


def get_query_template_tool(language: str, template_name: str) -> Dict[str, Any]:
    """Get a predefined tree-sitter query template."""
    template = get_query_template(language, template_name)
    if not template:
        raise ValueError(f"No template '{template_name}' for language '{language}'")

    return {
        "language": language,
        "name": template_name,
        "query": template,
    }


def list_query_templates_tool(language: Optional[str] = None) -> Dict[str, Any]:
    """List available query templates."""
    return list_query_templates(language)


def build_query(language: str, patterns: List[str], combine: str = "or") -> Dict[str, str]:
    """Build a tree-sitter query from templates or patterns."""
    query = build_compound_query(language, patterns, combine)
    return {
        "language": language,
        "query": query,
    }


def adapt_query(query: str, from_language: str, to_language: str) -> Dict[str, str]:
    """Adapt a query from one language to another."""
    adapted = adapt_query_for_language(query, from_language, to_language)
    return {
        "original_language": from_language,
        "target_language": to_language,
        "original_query": query,
        "adapted_query": adapted,
    }


def get_node_types(language: str) -> Dict[str, str]:
    """Get descriptions of common node types for a language."""
    return describe_node_types(language)


# Code Analysis Tools
def get_symbols(
    project: str, file_path: str, symbol_types: Optional[List[str]] = None
) -> Dict[str, List[Dict[str, Any]]]:
    """Extract symbols from a file."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()

    return extract_symbols(project_registry.get_project(project), file_path, language_registry, symbol_types)


def analyze_project(project: str, scan_depth: int = 3, ctx: Optional[Any] = None) -> Dict[str, Any]:
    """Analyze overall project structure."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()

    return analyze_project_structure(project_registry.get_project(project), language_registry, scan_depth, ctx)


def get_dependencies(project: str, file_path: str) -> Dict[str, List[str]]:
    """Find dependencies of a file."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()

    return find_dependencies(
        project_registry.get_project(project),
        file_path,
        language_registry,
    )


def analyze_complexity(project: str, file_path: str) -> Dict[str, Any]:
    """Analyze code complexity."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()

    return analyze_code_complexity(
        project_registry.get_project(project),
        file_path,
        language_registry,
    )


def find_similar_code(
    project: str,
    snippet: str,
    language: Optional[str] = None,
    threshold: float = 0.8,
    max_results: int = 10,
) -> List[Dict[str, Any]]:
    """Find similar code to a snippet."""
    # This is a simple implementation that uses text search
    project_registry = get_project_registry()

    # Map language names to file extensions
    extension_map = {
        "python": "py",
        "javascript": "js",
        "typescript": "ts",
        "rust": "rs",
        "go": "go",
        "java": "java",
        "c": "c",
        "cpp": "cpp",
        "ruby": "rb",
        "swift": "swift",
        "kotlin": "kt",
    }

    # Get the appropriate file extension for the language
    extension = extension_map.get(language, language) if language else None
    file_pattern = f"**/*.{extension}" if extension else None

    return search_text(
        project_registry.get_project(project),
        snippet,
        file_pattern=file_pattern,
        max_results=max_results,
    )


def find_usage(
    project: str,
    symbol: str,
    file_path: Optional[str] = None,
    language: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """Find usage of a symbol."""
    project_registry = get_project_registry()
    language_registry = get_language_registry()
    tree_cache = get_tree_cache()

    # Detect language if not provided but file_path is
    if not language and file_path:
        language = language_registry.language_for_file(file_path)

    if not language:
        raise ValueError("Either language or file_path must be provided")

    # Build a query to find references to the symbol
    query = f"""
    (
      (identifier) @reference
      (#eq? @reference "{symbol}")
    )
    """

    return query_code(project_registry.get_project(project), query, language_registry, tree_cache, file_path, language)


# Cache Management
def clear_cache(project: Optional[str] = None, file_path: Optional[str] = None) -> Dict[str, str]:
    """Clear the parse tree cache."""
    return api_clear_cache(project, file_path)


# Server configuration
def configure(
    config_path: Optional[str] = None,
    cache_enabled: Optional[bool] = None,
    max_file_size_mb: Optional[int] = None,
    log_level: Optional[str] = None,
) -> Dict[str, Any]:
    """Configure the server using the DI container."""
    container = get_container()
    config_manager = container.config_manager

    # Load config if path provided
    if config_path:
        logging.info(f"Configuring server with YAML config from: {config_path}")
        config_manager.load_from_file(config_path)

    # Update specific settings if provided
    if cache_enabled is not None:
        logging.info(f"Setting cache.enabled to {cache_enabled}")
        config_manager.update_value("cache.enabled", cache_enabled)
        container.tree_cache.set_enabled(cache_enabled)

    if max_file_size_mb is not None:
        logging.info(f"Setting security.max_file_size_mb to {max_file_size_mb}")
        config_manager.update_value("security.max_file_size_mb", max_file_size_mb)

    if log_level is not None:
        logging.info(f"Setting log_level to {log_level}")
        config_manager.update_value("log_level", log_level)

        # Apply log level directly to loggers
        log_level_value = getattr(logging, log_level, None)
        if log_level_value is not None:
            # Set the root logger for the package
            root_logger = logging.getLogger("mcp_server_tree_sitter")
            root_logger.setLevel(log_level_value)
            logging.info(f"Applied log level {log_level} to mcp_server_tree_sitter loggers")

    # Return current config as dict
    return config_manager.to_dict()


def configure_with_context(
    context: Any,
    config_path: Optional[str] = None,
    cache_enabled: Optional[bool] = None,
    max_file_size_mb: Optional[int] = None,
    log_level: Optional[str] = None,
) -> tuple[Dict[str, Any], Any]:
    """
    Configure with explicit context - compatibility function.

    In new DI model, context is replaced by container. This is a compatibility
    function that accepts a context parameter but uses the container internally.
    """
    # Just delegate to the regular configure function and return current config
    result = configure(config_path, cache_enabled, max_file_size_mb, log_level)
    return result, get_container().get_config()

```

--------------------------------------------------------------------------------
/src/mcp_server_tree_sitter/utils/tree_sitter_helpers.py:
--------------------------------------------------------------------------------

```python
"""Helper functions for tree-sitter operations.

This module provides wrappers and utility functions for common tree-sitter operations
to ensure type safety and consistent handling of tree-sitter objects.
"""

from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast

# Import tree_cache at runtime as needed to avoid circular imports
from ..utils.file_io import read_binary_file
from ..utils.tree_sitter_types import (
    Language,
    Node,
    Parser,
    Tree,
    TreeCursor,
    ensure_cursor,
    ensure_language,
    ensure_node,
    ensure_parser,
    ensure_tree,
)

T = TypeVar("T")


def create_parser(language_obj: Any) -> Parser:
    """
    Create a parser configured for a specific language.

    Args:
        language_obj: Language object

    Returns:
        Configured Parser
    """
    parser = Parser()
    safe_language = ensure_language(language_obj)

    # Try both set_language and language methods
    try:
        parser.set_language(safe_language)  # type: ignore
    except AttributeError:
        if hasattr(parser, "language"):
            # Use the language method if available
            parser.language = safe_language  # type: ignore
        else:
            # Fallback to setting the attribute directly
            parser.language = safe_language  # type: ignore

    return ensure_parser(parser)


def parse_source(source: bytes, parser: Union[Parser, Any]) -> Tree:
    """
    Parse source code using a configured parser.

    Args:
        source: Source code as bytes
        parser: Configured Parser object

    Returns:
        Parsed Tree
    """
    safe_parser = ensure_parser(parser)
    tree = safe_parser.parse(source)
    return ensure_tree(tree)


def parse_source_incremental(source: bytes, old_tree: Optional[Tree], parser: Parser) -> Tree:
    """
    Parse source code incrementally using a configured parser.

    Args:
        source: Source code as bytes
        old_tree: Previous tree for incremental parsing
        parser: Configured Parser object

    Returns:
        Parsed Tree
    """
    safe_parser = ensure_parser(parser)
    tree = safe_parser.parse(source, old_tree)
    return ensure_tree(tree)


def edit_tree(
    tree: Tree,
    edit_dict_or_start_byte: Union[Dict[str, Any], int],
    old_end_byte: Optional[int] = None,
    new_end_byte: Optional[int] = None,
    start_point: Optional[Tuple[int, int]] = None,
    old_end_point: Optional[Tuple[int, int]] = None,
    new_end_point: Optional[Tuple[int, int]] = None,
) -> Tree:
    """
    Edit a syntax tree to reflect source code changes.

    Args:
        tree: Tree to edit
        edit_dict_or_start_byte: Edit dictionary or start byte of the edit
        old_end_byte: End byte of the old text (if not using edit dict)
        new_end_byte: End byte of the new text (if not using edit dict)
        start_point: Start point (row, column) of the edit (if not using edit dict)
        old_end_point: End point of the old text (if not using edit dict)
        new_end_point: End point of the new text (if not using edit dict)

    Returns:
        Edited tree
    """
    safe_tree = ensure_tree(tree)

    # Handle both dictionary and individual parameters
    if isinstance(edit_dict_or_start_byte, dict):
        edit_dict = edit_dict_or_start_byte
        safe_tree.edit(
            start_byte=edit_dict["start_byte"],
            old_end_byte=edit_dict["old_end_byte"],
            new_end_byte=edit_dict["new_end_byte"],
            start_point=edit_dict["start_point"],
            old_end_point=edit_dict["old_end_point"],
            new_end_point=edit_dict["new_end_point"],
        )
    else:
        # Using individual parameters
        # Tree-sitter expects non-None values for these parameters
        _old_end_byte = 0 if old_end_byte is None else old_end_byte
        _new_end_byte = 0 if new_end_byte is None else new_end_byte
        _start_point = (0, 0) if start_point is None else start_point
        _old_end_point = (0, 0) if old_end_point is None else old_end_point
        _new_end_point = (0, 0) if new_end_point is None else new_end_point

        safe_tree.edit(
            start_byte=edit_dict_or_start_byte,
            old_end_byte=_old_end_byte,
            new_end_byte=_new_end_byte,
            start_point=_start_point,
            old_end_point=_old_end_point,
            new_end_point=_new_end_point,
        )
    return safe_tree


def get_changed_ranges(old_tree: Tree, new_tree: Tree) -> List[Tuple[int, int]]:
    """
    Get changed ranges between two syntax trees.

    Args:
        old_tree: Old syntax tree
        new_tree: New syntax tree

    Returns:
        List of changed ranges as tuples of (start_byte, end_byte)
    """
    safe_old_tree = ensure_tree(old_tree)
    safe_new_tree = ensure_tree(new_tree)

    # Note: This is a simplified implementation as tree_sitter Python
    # binding might not expose changed_ranges directly
    # In a real implementation, you would call:
    # ranges = old_tree.changed_ranges(new_tree)

    # For now, return a basic comparison at the root level
    old_root = safe_old_tree.root_node
    new_root = safe_new_tree.root_node

    if old_root.start_byte != new_root.start_byte or old_root.end_byte != new_root.end_byte:
        # Return the entire tree as changed
        return [(new_root.start_byte, new_root.end_byte)]

    return []


def parse_file(
    file_path: Path, parser_or_language: Union[Parser, str], registry: Optional[Any] = None
) -> Tuple[Tree, bytes]:
    """
    Parse a file using a configured parser.

    Args:
        file_path: Path to the file
        parser_or_language: Configured Parser object or language string
        registry: Language registry (needed for compatibility with old API)

    Returns:
        Tuple of (Tree, source_bytes)
    """
    source_bytes = read_binary_file(file_path)

    # If we received a parser directly, use it
    if hasattr(parser_or_language, "parse"):
        parser = parser_or_language
        tree = parse_source(source_bytes, parser)
        return cast(Tuple[Tree, bytes], (tree, source_bytes))

    # If we received a language string and registry, get the parser
    elif isinstance(parser_or_language, str) and registry is not None:
        try:
            parser = registry.get_parser(parser_or_language)
            tree = parse_source(source_bytes, parser)
            return cast(Tuple[Tree, bytes], (tree, source_bytes))
        except Exception as e:
            raise ValueError(f"Could not get parser for language '{parser_or_language}': {e}") from e

    # Invalid parameters
    raise ValueError(f"Invalid parser or language: {parser_or_language}")


def get_node_text(node: Node, source_bytes: bytes, decode: bool = True) -> Union[str, bytes]:
    """
    Safely get text for a node from source bytes.

    Args:
        node: Node object
        source_bytes: Source code as bytes
        decode: Whether to decode bytes to string (default: True)

    Returns:
        Text for the node as string or bytes
    """
    safe_node = ensure_node(node)
    try:
        node_bytes = source_bytes[safe_node.start_byte : safe_node.end_byte]
        if decode:
            try:
                return node_bytes.decode("utf-8", errors="replace")
            except (UnicodeDecodeError, AttributeError):
                return str(node_bytes)
        return node_bytes
    except (IndexError, ValueError):
        return "" if decode else b""


def walk_tree(node: Node) -> TreeCursor:
    """
    Get a cursor for walking a tree from a node.

    Args:
        node: Node to start from

    Returns:
        Tree cursor
    """
    safe_node = ensure_node(node)
    cursor = safe_node.walk()
    return ensure_cursor(cursor)


def cursor_walk_tree(node: Node, visit_fn: Callable[[Optional[Node], Optional[str], int], bool]) -> None:
    """
    Walk a tree using cursor for efficiency.

    Args:
        node: Root node to start from
        visit_fn: Function called for each node, receives (node, field_name, depth)
                  Return True to continue traversal, False to skip children
    """
    cursor = walk_tree(node)
    field_name = None
    depth = 0

    if not visit_fn(cursor.node, field_name, depth):
        return

    if cursor.goto_first_child():
        depth += 1

        while True:
            # Get field name if available
            field_name = None
            if cursor.node and cursor.node.parent:
                parent_field_names = getattr(cursor.node.parent, "children_by_field_name", {})
                if hasattr(parent_field_names, "items"):
                    for name, nodes in parent_field_names.items():
                        if cursor.node in nodes:
                            field_name = name
                            break

            if visit_fn(cursor.node, field_name, depth):
                # Visit children
                if cursor.goto_first_child():
                    depth += 1
                    continue

            # No children or children skipped, try siblings
            if cursor.goto_next_sibling():
                continue

            # No more siblings, go up
            while depth > 0:
                cursor.goto_parent()
                depth -= 1

                if cursor.goto_next_sibling():
                    break

            # If we've returned to the root, we're done
            if depth == 0:
                break


def collect_with_cursor(
    node: Node,
    collector_fn: Callable[[Optional[Node], Optional[str], int], Optional[T]],
) -> List[T]:
    """
    Collect items from a tree using cursor traversal.

    Args:
        node: Root node to start from
        collector_fn: Function that returns an item to collect or None to skip
                     Receives (node, field_name, depth)

    Returns:
        List of collected items
    """
    items: List[T] = []

    def visit(node: Optional[Node], field_name: Optional[str], depth: int) -> bool:
        if node is None:
            return False
        item = collector_fn(node, field_name, depth)
        if item is not None:
            items.append(item)
        return True  # Continue traversal

    cursor_walk_tree(node, visit)
    return items


def find_nodes_by_type(root_node: Node, node_type: str) -> List[Node]:
    """
    Find all nodes of a specific type in a tree.

    Args:
        root_node: Root node to search from
        node_type: Type of node to find

    Returns:
        List of matching nodes
    """

    def collector(node: Optional[Node], _field_name: Optional[str], _depth: int) -> Optional[Node]:
        if node is None:
            return None
        if node.type == node_type:
            return node
        return None

    return collect_with_cursor(root_node, collector)


def get_node_descendants(node: Optional[Node], max_depth: Optional[int] = None) -> List[Node]:
    """
    Get all descendants of a node.

    Args:
        node: Node to get descendants for
        max_depth: Maximum depth to traverse

    Returns:
        List of descendant nodes
    """
    descendants: List[Node] = []

    if node is None:
        return descendants

    def visit(node: Optional[Node], _field_name: Optional[str], depth: int) -> bool:
        if node is None:
            return False
        if max_depth is not None and depth > max_depth:
            return False  # Skip children

        if depth > 0:  # Skip the root node
            descendants.append(node)

        return True  # Continue traversal

    cursor_walk_tree(node, visit)
    return descendants


def parse_with_cached_tree(
    file_path: Path, language: str, language_obj: Language, tree_cache: Any = None
) -> Tuple[Tree, bytes]:
    """
    Parse a file with tree caching.

    Args:
        file_path: Path to the file
        language: Language identifier
        language_obj: Language object
        tree_cache: Tree cache instance (optional, falls back to container if not provided)

    Returns:
        Tuple of (Tree, source_bytes)
    """
    # Get tree cache from container if not provided
    if tree_cache is None:
        from ..di import get_container

        tree_cache = get_container().tree_cache

    # Check if we have a cached tree
    cached = tree_cache.get(file_path, language)
    if cached:
        tree, source_bytes = cached
        # Ensure tree is properly typed
        return ensure_tree(tree), source_bytes

    # Parse the file using our own parser to avoid registry complications
    parser = create_parser(language_obj)
    source_bytes = read_binary_file(file_path)
    tree = parse_source(source_bytes, parser)

    # Cache the tree
    tree_cache.put(file_path, language, tree, source_bytes)

    return cast(Tuple[Tree, bytes], (tree, source_bytes))


def update_cached_tree(
    file_path: Path,
    language: str,
    language_obj: Language,
    start_byte: int,
    old_end_byte: int,
    new_end_byte: int,
    start_point: Tuple[int, int],
    old_end_point: Tuple[int, int],
    new_end_point: Tuple[int, int],
    tree_cache: Any = None,
) -> Optional[Tuple[Tree, bytes]]:
    """
    Update a cached tree with edit operation.

    Args:
        file_path: Path to the source file
        language: Language identifier
        language_obj: Language object
        start_byte, old_end_byte, new_end_byte: Byte positions of edit
        start_point, old_end_point, new_end_point: Row/column positions of edit
        tree_cache: Tree cache instance (optional, falls back to container if not provided)

    Returns:
        Updated (tree, source_bytes) if successful, None otherwise
    """
    # Get tree cache from container if not provided
    if tree_cache is None:
        from ..di import get_container

        tree_cache = get_container().tree_cache

    # Check if we have a cached tree
    cached = tree_cache.get(file_path, language)
    if not cached:
        return None

    old_tree, old_source = cached

    try:
        # Apply edit to the tree
        edit_dict = {
            "start_byte": start_byte,
            "old_end_byte": old_end_byte,
            "new_end_byte": new_end_byte,
            "start_point": start_point,
            "old_end_point": old_end_point,
            "new_end_point": new_end_point,
        }
        edit_tree(old_tree, edit_dict)

        # Read updated source
        with open(file_path, "rb") as f:
            new_source = f.read()

        # Parse incrementally
        parser = create_parser(language_obj)
        new_tree = parse_source_incremental(new_source, old_tree, parser)

        # Update cache
        tree_cache.put(file_path, language, new_tree, new_source)

        return cast(Tuple[Tree, bytes], (new_tree, new_source))
    except Exception:
        # If incremental parsing fails, fall back to full parse
        return parse_with_cached_tree(file_path, language, language_obj, tree_cache=tree_cache)


# Additional helper functions required by tests


def create_edit(
    start_byte: int,
    old_end_byte: int,
    new_end_byte: int,
    start_point: Tuple[int, int],
    old_end_point: Tuple[int, int],
    new_end_point: Tuple[int, int],
) -> Dict[str, Any]:
    """
    Create an edit dictionary for modifying trees.

    Args:
        start_byte: Start byte of the edit
        old_end_byte: End byte of the old text
        new_end_byte: End byte of the new text
        start_point: Start point (row, column) of the edit
        old_end_point: End point of the old text
        new_end_point: End point of the new text

    Returns:
        Edit dictionary with all parameters
    """
    return {
        "start_byte": start_byte,
        "old_end_byte": old_end_byte,
        "new_end_byte": new_end_byte,
        "start_point": start_point,
        "old_end_point": old_end_point,
        "new_end_point": new_end_point,
    }


def parse_file_with_detection(file_path: Path, language: Optional[str], registry: Any) -> Tuple[Tree, bytes]:
    """
    Parse a file with language detection.

    Args:
        file_path: Path to the file
        language: Optional language identifier (detected from extension if None)
        registry: Language registry for getting parsers

    Returns:
        Tuple of (Tree, source_bytes)
    """
    if not file_path.exists():
        raise FileNotFoundError(f"File not found: {file_path}")

    # Auto-detect language if not provided
    if language is None:
        ext = file_path.suffix.lower()
        if ext == ".py":
            language = "python"
        elif ext in [".js", ".jsx"]:
            language = "javascript"
        elif ext in [".ts", ".tsx"]:
            language = "typescript"
        elif ext in [".java"]:
            language = "java"
        elif ext in [".c", ".h"]:
            language = "c"
        elif ext in [".cpp", ".hpp", ".cc", ".hh"]:
            language = "cpp"
        elif ext in [".go"]:
            language = "go"
        elif ext in [".rs"]:
            language = "rust"
        elif ext in [".rb"]:
            language = "ruby"
        elif ext in [".php"]:
            language = "php"
        else:
            raise ValueError(f"Could not detect language for file: {file_path}")

    if language is None:
        raise ValueError(f"Language required for parsing file: {file_path}")

    # Get parser for language
    try:
        parser = registry.get_parser(language)
    except Exception as e:
        raise ValueError(f"Could not get parser for language '{language}': {e}") from e

    # Read file and parse
    source_bytes = read_binary_file(file_path)
    tree = parse_source(source_bytes, parser)

    return cast(Tuple[Tree, bytes], (tree, source_bytes))


def parse_file_incremental(file_path: Path, old_tree: Tree, language: str, registry: Any) -> Tuple[Tree, bytes]:
    """
    Parse a file incrementally using a previous tree.

    Args:
        file_path: Path to the file
        old_tree: Previous tree for incremental parsing
        language: Language identifier
        registry: Language registry for getting parsers

    Returns:
        Tuple of (Tree, source_bytes)
    """
    if not file_path.exists():
        raise FileNotFoundError(f"File not found: {file_path}")

    # Get parser for language
    parser = registry.get_parser(language)

    # Read file and parse incrementally
    source_bytes = read_binary_file(file_path)
    tree = parse_source_incremental(source_bytes, old_tree, parser)

    return cast(Tuple[Tree, bytes], (tree, source_bytes))


def get_node_with_text(node: Node, source_bytes: bytes, text: bytes) -> Optional[Node]:
    """
    Find a node containing specific text.

    Args:
        node: Root node to search from
        source_bytes: Source code as bytes
        text: Text to search for (as bytes)

    Returns:
        Node containing the text or None if not found
    """
    # Ensure we get bytes back from get_node_text
    if text in get_node_text(node, source_bytes, decode=False):
        # Check if any child contains the text
        for child in node.children:
            result = get_node_with_text(child, source_bytes, text)
            if result is not None:
                return result
        # If no child contains the text, return this node
        return node
    return None


def is_node_inside(pos_or_node: Union[Node, Tuple[int, int]], container_node: Node) -> bool:
    """
    Check if a node or position is inside another node.

    Args:
        pos_or_node: Node or position (row, column) to check
        container_node: Node that might contain the other node/position

    Returns:
        True if the node/position is inside the container node, False otherwise
    """
    # Handle position case
    if isinstance(pos_or_node, tuple):
        row, column = pos_or_node
        start_row, start_col = container_node.start_point
        end_row, end_col = container_node.end_point

        # Check if position is within node boundaries
        if row < start_row or row > end_row:
            return False
        if row == start_row and column < start_col:
            return False
        if row == end_row and column > end_col:
            return False
        return True

    # Handle node case
    node = pos_or_node
    if node == container_node:
        return True  # Node is inside itself

    # Check if node's boundaries are within container's boundaries
    return is_node_inside(node.start_point, container_node) and is_node_inside(node.end_point, container_node)


def find_all_descendants(node: Node, max_depth: Optional[int] = None) -> List[Node]:
    """
    Find all descendant nodes of a given node.

    Args:
        node: Root node to search from
        max_depth: Maximum depth to search

    Returns:
        List of all descendant nodes
    """
    return get_node_descendants(node, max_depth)

```

--------------------------------------------------------------------------------
/src/mcp_server_tree_sitter/config.py:
--------------------------------------------------------------------------------

```python
"""Configuration management with explicit manager class.

Environment variables can be used to override configuration settings with the following format:
- MCP_TS_SECTION_SETTING - For section settings (e.g., MCP_TS_CACHE_MAX_SIZE_MB)
- MCP_TS_SETTING - For top-level settings (e.g., MCP_TS_LOG_LEVEL)

The precedence order for configuration is:
1. Environment variables (highest)
2. Explicit updates via update_value()
3. YAML configuration from file
4. Default values (lowest)
"""

import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union

import yaml
from pydantic import BaseModel, Field

# Import logging from bootstrap package
from .bootstrap import get_logger, update_log_levels

logger = get_logger(__name__)


class CacheConfig(BaseModel):
    """Configuration for caching behavior."""

    enabled: bool = True
    max_size_mb: int = 100
    ttl_seconds: int = 300  # Time-to-live for cached items


class SecurityConfig(BaseModel):
    """Security settings."""

    max_file_size_mb: int = 5
    excluded_dirs: List[str] = Field(default_factory=lambda: [".git", "node_modules", "__pycache__"])
    allowed_extensions: Optional[List[str]] = None  # None means all extensions allowed


class LanguageConfig(BaseModel):
    """Language-specific configuration."""

    auto_install: bool = False  # DEPRECATED: No longer used with tree-sitter-language-pack
    default_max_depth: int = 5  # Default depth for AST traversal
    preferred_languages: List[str] = Field(default_factory=list)


class ServerConfig(BaseModel):
    """Main server configuration."""

    cache: CacheConfig = Field(default_factory=CacheConfig)
    security: SecurityConfig = Field(default_factory=SecurityConfig)
    language: LanguageConfig = Field(default_factory=LanguageConfig)
    log_level: str = "INFO"
    max_results_default: int = 100

    @classmethod
    def from_file(cls, path: str) -> "ServerConfig":
        """Load configuration from YAML file."""
        logger = logging.getLogger(__name__)
        config_path = Path(path)
        if not config_path.exists():
            logger.warning(f"Config file does not exist: {path}")
            return cls()

        try:
            with open(config_path, "r") as f:
                file_content = f.read()
                logger.debug(f"YAML File content:\n{file_content}")
                config_data = yaml.safe_load(file_content)

            logger.debug(f"Loaded config data: {config_data}")

            if config_data is None:
                logger.warning(f"Config file is empty or contains only comments: {path}")
                return cls()

            # Create config from file
            config = cls(**config_data)

            # Apply environment variables on top of file config
            update_config_from_env(config)

            return config
        except Exception as e:
            logger.error(f"Error loading configuration from {path}: {e}")
            import traceback

            logger.debug(traceback.format_exc())
            return cls()

    @classmethod
    def from_env(cls) -> "ServerConfig":
        """Load configuration from environment variables."""
        config = cls()
        update_config_from_env(config)
        return config


def update_config_from_env(config: ServerConfig) -> None:
    """Update configuration from environment variables.

    This function applies all environment variables with the MCP_TS_ prefix
    to the provided config object, using the single underscore format only.

    Args:
        config: The ServerConfig object to update with environment variables
    """
    logger = logging.getLogger(__name__)
    env_prefix = "MCP_TS_"

    # Get all environment variables with our prefix
    env_vars = {k: v for k, v in os.environ.items() if k.startswith(env_prefix)}

    # Process the environment variables
    for env_name, env_value in env_vars.items():
        # Remove the prefix
        key = env_name[len(env_prefix) :]
        logger.debug(f"Processing environment variable: {env_name}, key after prefix removal: {key}")

        # Single underscore format only (MCP_TS_CACHE_MAX_SIZE_MB)
        # If the config has a section matching the first part, use it
        # Otherwise, it might be a top-level setting
        parts = key.lower().split("_")

        # Check if first part is a valid section
        if len(parts) > 1 and hasattr(config, parts[0]):
            section = parts[0]
            # All remaining parts form the setting name
            setting = "_".join(parts[1:])
            logger.debug(f"Single underscore format: section={section}, setting={setting}")
        else:
            # No section match found, treat as top-level setting
            section = None
            setting = key.lower()
            logger.debug(f"Top-level setting: {setting}")

        # Apply the setting to the configuration
        if section is None:
            # Top-level setting
            if hasattr(config, setting):
                orig_value = getattr(config, setting)
                new_value = _convert_value(env_value, orig_value)
                setattr(config, setting, new_value)
                logger.debug(f"Applied environment variable {env_name} to {setting}: {orig_value} -> {new_value}")
            else:
                logger.warning(f"Unknown top-level setting in environment variable {env_name}: {setting}")
        elif hasattr(config, section):
            # Section setting
            section_obj = getattr(config, section)
            if hasattr(section_obj, setting):
                # Convert the value to the appropriate type
                orig_value = getattr(section_obj, setting)
                new_value = _convert_value(env_value, orig_value)
                setattr(section_obj, setting, new_value)
                logger.debug(
                    f"Applied environment variable {env_name} to {section}.{setting}: {orig_value} -> {new_value}"
                )
            else:
                logger.warning(f"Unknown setting {setting} in section {section} from environment variable {env_name}")


def _convert_value(value_str: str, current_value: Any) -> Any:
    """Convert string value from environment variable to the appropriate type.

    Args:
        value_str: The string value from the environment variable
        current_value: The current value to determine the type

    Returns:
        The converted value with the appropriate type, or the original value if conversion fails
    """
    logger = logging.getLogger(__name__)

    # Handle different types
    try:
        if isinstance(current_value, bool):
            return value_str.lower() in ("true", "yes", "1", "y", "t", "on")
        elif isinstance(current_value, int):
            return int(value_str)
        elif isinstance(current_value, float):
            return float(value_str)
        elif isinstance(current_value, list):
            # Convert comma-separated string to list
            return [item.strip() for item in value_str.split(",")]
        else:
            # Default to string
            return value_str
    except (ValueError, TypeError) as e:
        # If conversion fails, log a warning and return the original value
        logger.warning(f"Failed to convert value '{value_str}' to type {type(current_value).__name__}: {e}")
        return current_value


class ConfigurationManager:
    """Manages server configuration without relying on global variables."""

    def __init__(self, initial_config: Optional[ServerConfig] = None):
        """Initialize with optional initial configuration."""
        self._config = initial_config or ServerConfig()
        self._logger = logging.getLogger(__name__)

        # Apply environment variables to the initial configuration
        # Log before state for debugging
        self._logger.debug(
            f"Before applying env vars in __init__: cache.max_size_mb = {self._config.cache.max_size_mb}, "
            f"security.max_file_size_mb = {self._config.security.max_file_size_mb}"
        )

        # Apply environment variables
        update_config_from_env(self._config)

        # Log after state for debugging
        self._logger.debug(
            f"After applying env vars in __init__: cache.max_size_mb = {self._config.cache.max_size_mb}, "
            f"security.max_file_size_mb = {self._config.security.max_file_size_mb}"
        )

    def get_config(self) -> ServerConfig:
        """Get the current configuration."""
        return self._config

    def load_from_file(self, path: Union[str, Path]) -> ServerConfig:
        """Load configuration from a YAML file."""
        self._logger.info(f"Loading configuration from file: {path}")
        config_path = Path(path)

        # Log more information for debugging
        self._logger.info(f"Absolute path: {config_path.absolute()}")
        self._logger.info(f"Path exists: {config_path.exists()}")

        if not config_path.exists():
            self._logger.error(f"Config file does not exist: {path}")
            return self._config

        try:
            with open(config_path, "r") as f:
                file_content = f.read()
                self._logger.info(f"YAML File content:\n{file_content}")
                # Check if file content is empty
                if not file_content.strip():
                    self._logger.error(f"Config file is empty: {path}")
                    return self._config

                # Try to parse YAML
                config_data = yaml.safe_load(file_content)
                self._logger.info(f"YAML parsing successful? {config_data is not None}")

            self._logger.info(f"Loaded config data: {config_data}")

            if config_data is None:
                self._logger.error(f"Config file is empty or contains only comments: {path}")
                return self._config

            # Debug output before update
            self._logger.info(
                f"Before update: cache.max_size_mb = {self._config.cache.max_size_mb}, "
                f"security.max_file_size_mb = {self._config.security.max_file_size_mb}"
            )

            # Better error handling for invalid YAML data
            if not isinstance(config_data, dict):
                self._logger.error(f"YAML data is not a dictionary: {type(config_data)}")
                return self._config

            # Log the YAML structure
            self._logger.info(f"YAML structure: {list(config_data.keys()) if config_data else 'None'}")

            # Create new config from file data
            try:
                new_config = ServerConfig(**config_data)

                # Debug output for new config
                self._logger.info(
                    f"New config: cache.max_size_mb = {new_config.cache.max_size_mb}, "
                    f"security.max_file_size_mb = {new_config.security.max_file_size_mb}"
                )
            except Exception as e:
                self._logger.error(f"Error creating ServerConfig from YAML data: {e}")
                return self._config

            # Instead of simply replacing config object, use update_config_from_new to ensure
            # all attributes are copied correctly (similar to how load_config function works)
            update_config_from_new(self._config, new_config)

            # Debug output after update
            self._logger.info(
                f"After update: cache.max_size_mb = {self._config.cache.max_size_mb}, "
                f"security.max_file_size_mb = {self._config.security.max_file_size_mb}"
            )

            # Apply environment variables AFTER loading YAML
            # This ensures environment variables have highest precedence
            self._logger.info("Applying environment variables to override YAML settings")
            update_config_from_env(self._config)

            # Log after applying environment variables to show final state
            self._logger.info(
                f"After applying env vars: cache.max_size_mb = {self._config.cache.max_size_mb}, "
                f"security.max_file_size_mb = {self._config.security.max_file_size_mb}"
            )

            # Apply configuration to dependencies
            try:
                from .di import get_container

                container = get_container()

                # Update tree cache settings
                self._logger.info(
                    f"Setting tree cache: enabled={self._config.cache.enabled}, "
                    f"size={self._config.cache.max_size_mb}MB, ttl={self._config.cache.ttl_seconds}s"
                )
                container.tree_cache.set_enabled(self._config.cache.enabled)
                container.tree_cache.set_max_size_mb(self._config.cache.max_size_mb)
                container.tree_cache.set_ttl_seconds(self._config.cache.ttl_seconds)

                # Update logging configuration using centralized bootstrap module
                update_log_levels(self._config.log_level)
                self._logger.debug(f"Applied log level {self._config.log_level} to mcp_server_tree_sitter loggers")

                self._logger.info("Applied configuration to dependencies")
            except (ImportError, AttributeError) as e:
                self._logger.warning(f"Could not apply config to dependencies: {e}")

            self._logger.info(f"Successfully loaded configuration from {path}")

            return self._config

        except Exception as e:
            self._logger.error(f"Error loading configuration from {path}: {e}")
            import traceback

            self._logger.error(traceback.format_exc())
            return self._config

    def update_value(self, path: str, value: Any) -> None:
        """Update a specific configuration value by dot-notation path."""
        parts = path.split(".")

        # Store original value for logging
        old_value = None

        # Handle two levels deep for now (e.g., "cache.max_size_mb")
        if len(parts) == 2:
            section, key = parts

            if hasattr(self._config, section):
                section_obj = getattr(self._config, section)
                if hasattr(section_obj, key):
                    old_value = getattr(section_obj, key)
                    setattr(section_obj, key, value)
                    self._logger.debug(f"Updated config value {path} from {old_value} to {value}")
                else:
                    self._logger.warning(f"Unknown config key: {key} in section {section}")
            else:
                self._logger.warning(f"Unknown config section: {section}")
        else:
            # Handle top-level attributes
            if hasattr(self._config, path):
                old_value = getattr(self._config, path)
                setattr(self._config, path, value)
                self._logger.debug(f"Updated config value {path} from {old_value} to {value}")

                # If updating log_level, apply it using centralized bootstrap function
                if path == "log_level":
                    # Use centralized bootstrap module
                    update_log_levels(value)
                    self._logger.debug(f"Applied log level {value} to mcp_server_tree_sitter loggers")
            else:
                self._logger.warning(f"Unknown config path: {path}")

        # After direct updates, ensure environment variables still have precedence
        # by reapplying them - this ensures consistency in the precedence model
        # Environment variables > Explicit updates > YAML > Defaults
        update_config_from_env(self._config)

    def to_dict(self) -> Dict[str, Any]:
        """Convert configuration to a dictionary."""
        return {
            "cache": {
                "enabled": self._config.cache.enabled,
                "max_size_mb": self._config.cache.max_size_mb,
                "ttl_seconds": self._config.cache.ttl_seconds,
            },
            "security": {
                "max_file_size_mb": self._config.security.max_file_size_mb,
                "excluded_dirs": self._config.security.excluded_dirs,
            },
            "language": {
                "auto_install": self._config.language.auto_install,
                "default_max_depth": self._config.language.default_max_depth,
            },
            "log_level": self._config.log_level,
        }


# We've removed the global CONFIG instance to eliminate global state and
# potential concurrency issues. All code should now use either:
# 1. The context's config_manager.get_config() method
# 2. A locally instantiated ServerConfig object
# 3. Configuration passed as function parameters


def get_default_config_path() -> Optional[Path]:
    """Get the default configuration file path based on the platform."""
    import platform

    if platform.system() == "Windows":
        config_dir = Path(os.environ.get("USERPROFILE", "")) / ".config" / "tree-sitter"
    else:
        config_dir = Path(os.environ.get("HOME", "")) / ".config" / "tree-sitter"

    config_path = config_dir / "config.yaml"

    if config_path.exists():
        return config_path

    return None


def update_config_from_new(original: ServerConfig, new: ServerConfig) -> None:
    """Update the original config with values from the new config."""
    logger = logging.getLogger(__name__)

    # Log before values
    logger.info(
        f"[update_config_from_new] Before: cache.max_size_mb={original.cache.max_size_mb}, "
        f"security.max_file_size_mb={original.security.max_file_size_mb}"
    )
    logger.info(
        f"[update_config_from_new] New values: cache.max_size_mb={new.cache.max_size_mb}, "
        f"security.max_file_size_mb={new.security.max_file_size_mb}"
    )

    # Update all attributes, copying collections to avoid reference issues
    try:
        # Cache settings
        original.cache.enabled = new.cache.enabled
        original.cache.max_size_mb = new.cache.max_size_mb
        original.cache.ttl_seconds = new.cache.ttl_seconds

        # Security settings
        original.security.max_file_size_mb = new.security.max_file_size_mb
        original.security.excluded_dirs = new.security.excluded_dirs.copy()
        if new.security.allowed_extensions:
            original.security.allowed_extensions = new.security.allowed_extensions.copy()
        else:
            original.security.allowed_extensions = None

        # Language settings
        original.language.auto_install = new.language.auto_install
        original.language.default_max_depth = new.language.default_max_depth
        original.language.preferred_languages = new.language.preferred_languages.copy()

        # Other settings
        original.log_level = new.log_level
        original.max_results_default = new.max_results_default

        # Log after values to confirm update succeeded
        logger.info(
            f"[update_config_from_new] After: cache.max_size_mb={original.cache.max_size_mb}, "
            f"security.max_file_size_mb={original.security.max_file_size_mb}"
        )
    except Exception as e:
        logger.error(f"Error updating config: {e}")
        # Ensure at least some values get updated
        try:
            original.cache.max_size_mb = new.cache.max_size_mb
            original.security.max_file_size_mb = new.security.max_file_size_mb
            original.language.default_max_depth = new.language.default_max_depth
            logger.info("Fallback update succeeded with basic values")
        except Exception as e2:
            logger.error(f"Fallback update also failed: {e2}")


def load_config(config_path: Optional[str] = None) -> ServerConfig:
    """Load and initialize configuration.

    Args:
        config_path: Path to YAML config file

    Returns:
        ServerConfig: The loaded configuration
    """
    logger = logging.getLogger(__name__)
    logger.info(f"load_config called with config_path={config_path}")

    # Create a new config instance
    config = ServerConfig()

    # Determine which config path to use
    path_to_load = None

    if config_path:
        # Use explicitly provided path
        path_to_load = Path(config_path)
    elif os.environ.get("MCP_TS_CONFIG_PATH"):
        # Use path from environment variable
        config_path_env = os.environ.get("MCP_TS_CONFIG_PATH")
        if config_path_env is not None:
            path_to_load = Path(config_path_env)
    else:
        # Try to use default config path
        default_path = get_default_config_path()
        if default_path:
            path_to_load = default_path
            logger.info(f"Using default configuration from {path_to_load}")

    # Load configuration from the determined path
    if path_to_load and path_to_load.exists():
        try:
            logger.info(f"Loading configuration from file: {path_to_load}")

            with open(path_to_load, "r") as f:
                content = f.read()
                logger.debug(f"File content:\n{content}")
                if not content.strip():
                    logger.warning("Config file is empty")
                    # Continue to apply environment variables below
                else:
                    # Load new configuration
                    logger.info(f"Loading configuration from {str(path_to_load)}")
                    new_config = ServerConfig.from_file(str(path_to_load))

                    # Debug output before update
                    logger.info(
                        f"New configuration loaded: cache.max_size_mb = {new_config.cache.max_size_mb}, "
                        f"security.max_file_size_mb = {new_config.security.max_file_size_mb}"
                    )

                    # Update the config by copying all attributes
                    update_config_from_new(config, new_config)

                    # Debug output after update
                    logger.info(f"Successfully loaded configuration from {path_to_load}")
                    logger.debug(
                        f"Updated config: cache.max_size_mb = {config.cache.max_size_mb}, "
                        f"security.max_file_size_mb = {config.security.max_file_size_mb}"
                    )

        except Exception as e:
            logger.error(f"Error loading configuration from {path_to_load}: {e}")
            import traceback

            logger.debug(traceback.format_exc())

    # Apply environment variables to configuration
    # This ensures that environment variables have the highest precedence
    # regardless of whether a config file was found
    update_config_from_env(config)

    logger.info(
        f"Final configuration: cache.max_size_mb = {config.cache.max_size_mb}, "
        f"security.max_file_size_mb = {config.security.max_file_size_mb}"
    )

    return config

```

--------------------------------------------------------------------------------
/tests/test_symbol_extraction.py:
--------------------------------------------------------------------------------

```python
"""
Tests for symbol extraction and dependency analysis issues.

This module contains tests specifically focused on the symbol extraction and
dependency analysis issues identified in FEATURES.md.
"""

import json
import os
import tempfile
from pathlib import Path
from typing import Any, Dict, Generator

import pytest

from tests.test_helpers import (
    get_ast,
    get_dependencies,
    get_symbols,
    register_project_tool,
)


@pytest.fixture
def test_project(request) -> Generator[Dict[str, Any], None, None]:
    """Create a test project with Python files containing known symbols and imports."""
    with tempfile.TemporaryDirectory() as temp_dir:
        project_path = Path(temp_dir)

        # Create a Python file with known symbols and dependencies
        test_file = project_path / "test.py"
        with open(test_file, "w") as f:
            f.write(
                """
import os
import sys
from typing import List, Dict, Optional
from datetime import datetime as dt

class Person:
    def __init__(self, name: str, age: int):
        self.name = name
        self.age = age

    def greet(self) -> str:
        return f"Hello, my name is {self.name} and I'm {self.age} years old."

class Employee(Person):
    def __init__(self, name: str, age: int, employee_id: str):
        super().__init__(name, age)
        self.employee_id = employee_id

    def greet(self) -> str:
        basic_greeting = super().greet()
        return f"{basic_greeting} I am employee {self.employee_id}."

def process_data(items: List[str]) -> Dict[str, int]:
    result = {}
    for item in items:
        result[item] = len(item)
    return result

def calculate_age(birthdate: dt) -> int:
    today = dt.now()
    age = today.year - birthdate.year
    if (today.month, today.day) < (birthdate.month, birthdate.day):
        age -= 1
    return age

if __name__ == "__main__":
    p = Person("Alice", 30)
    e = Employee("Bob", 25, "E12345")

    print(p.greet())
    print(e.greet())

    data = process_data(["apple", "banana", "cherry"])
    print(data)

    bob_birthday = dt(1998, 5, 15)
    bob_age = calculate_age(bob_birthday)
    print(f"Bob's age is {bob_age}")
"""
            )

        # Create a second file with additional imports and symbols
        utils_file = project_path / "utils.py"
        with open(utils_file, "w") as f:
            f.write(
                """
import json
import csv
import random
from typing import Any, List, Dict, Tuple
from pathlib import Path

def save_json(data: Dict[str, Any], filename: str) -> None:
    with open(filename, 'w') as f:
        json.dump(data, f, indent=2)

def load_json(filename: str) -> Dict[str, Any]:
    with open(filename, 'r') as f:
        return json.load(f)

def generate_random_data(count: int) -> List[Dict[str, Any]]:
    result = []
    for i in range(count):
        person = {
            "id": i,
            "name": f"Person {i}",
            "age": random.randint(18, 80),
            "active": random.choice([True, False])
        }
        result.append(person)
    return result

class FileHandler:
    def __init__(self, base_path: str):
        self.base_path = Path(base_path)

    def save_data(self, data: Dict[str, Any], filename: str) -> str:
        file_path = self.base_path / filename
        save_json(data, str(file_path))
        return str(file_path)

    def load_data(self, filename: str) -> Dict[str, Any]:
        file_path = self.base_path / filename
        return load_json(str(file_path))
"""
            )

        # Generate a unique project name based on the test name
        test_name = request.node.name
        unique_id = abs(hash(test_name)) % 10000
        project_name = f"symbol_test_project_{unique_id}"

        # Register project
        try:
            register_project_tool(path=str(project_path), name=project_name)
        except Exception:
            # If registration fails, try with an even more unique name
            import time

            project_name = f"symbol_test_project_{unique_id}_{int(time.time())}"
            register_project_tool(path=str(project_path), name=project_name)

        yield {
            "name": project_name,
            "path": str(project_path),
            "files": ["test.py", "utils.py"],
        }


def test_symbol_extraction_diagnostics(test_project) -> None:
    """Test symbol extraction to diagnose specific issues in the implementation."""
    # Get symbols from first file, excluding class methods
    symbols = get_symbols(project=test_project["name"], file_path="test.py")

    # Also get symbols with class methods excluded for comparison
    from mcp_server_tree_sitter.api import get_language_registry, get_project_registry
    from mcp_server_tree_sitter.tools.analysis import extract_symbols

    project = get_project_registry().get_project(test_project["name"])
    language_registry = get_language_registry()
    symbols_excluding_methods = extract_symbols(project, "test.py", language_registry, exclude_class_methods=True)

    # Verify the result structure
    assert "functions" in symbols, "Result should contain 'functions' key"
    assert "classes" in symbols, "Result should contain 'classes' key"
    assert "imports" in symbols, "Result should contain 'imports' key"

    # Print diagnostic information
    print("\nSymbol extraction results for test.py:")
    print(f"Functions: {symbols['functions']}")
    print(f"Functions (excluding methods): {symbols_excluding_methods['functions']}")
    print(f"Classes: {symbols['classes']}")
    print(f"Imports: {symbols['imports']}")

    # Check symbol counts
    expected_function_count = 2  # process_data, calculate_age
    expected_class_count = 2  # Person, Employee
    expected_import_count = 4  # os, sys, typing, datetime

    # Verify extracted symbols
    if symbols_excluding_methods["functions"] and len(symbols_excluding_methods["functions"]) > 0:
        # Instead of checking exact counts, just verify we found the main functions
        function_names = [f["name"] for f in symbols_excluding_methods["functions"]]

        # Check for process_data function - handle both bytes and strings
        process_data_found = False
        for name in function_names:
            if (isinstance(name, bytes) and b"process_data" in name) or (
                isinstance(name, str) and "process_data" in name
            ):
                process_data_found = True
                break

        # Check for calculate_age function - handle both bytes and strings
        calculate_age_found = False
        for name in function_names:
            if (isinstance(name, bytes) and b"calculate_age" in name) or (
                isinstance(name, str) and "calculate_age" in name
            ):
                calculate_age_found = True
                break

        assert process_data_found, "Expected to find 'process_data' function"
        assert calculate_age_found, "Expected to find 'calculate_age' function"
    else:
        print(f"KNOWN ISSUE: Expected {expected_function_count} functions, but got empty list")

    if symbols["classes"] and len(symbols["classes"]) > 0:
        assert len(symbols["classes"]) == expected_class_count
    else:
        print(f"KNOWN ISSUE: Expected {expected_class_count} classes, but got empty list")

    if symbols["imports"] and len(symbols["imports"]) > 0:
        # Our improved import detection now finds individual import names plus the statements
        # So we'll just check that we found all expected import modules
        import_texts = [imp.get("name", "") for imp in symbols["imports"]]
        for module in ["os", "sys", "typing", "datetime"]:
            assert any(
                (isinstance(text, bytes) and module.encode() in text) or (isinstance(text, str) and module in text)
                for text in import_texts
            ), f"Should find '{module}' import"
    else:
        print(f"KNOWN ISSUE: Expected {expected_import_count} imports, but got empty list")

    # Now check the second file to ensure results are consistent
    symbols_utils = get_symbols(project=test_project["name"], file_path="utils.py")

    print("\nSymbol extraction results for utils.py:")
    print(f"Functions: {symbols_utils['functions']}")
    print(f"Classes: {symbols_utils['classes']}")
    print(f"Imports: {symbols_utils['imports']}")


def test_dependency_analysis_diagnostics(test_project) -> None:
    """Test dependency analysis to diagnose specific issues in the implementation."""
    # Get dependencies from the first file
    dependencies = get_dependencies(project=test_project["name"], file_path="test.py")

    # Print diagnostic information
    print("\nDependency analysis results for test.py:")
    print(f"Dependencies: {dependencies}")

    # Expected dependencies based on imports
    expected_dependencies = ["os", "sys", "typing", "datetime"]

    # Check dependencies that should be found
    if dependencies and len(dependencies) > 0:
        # If we have a module list, check against that directly
        if "module" in dependencies:
            # Modify test to be more flexible with datetime imports
            for dep in ["os", "sys", "typing"]:
                assert any(
                    (isinstance(mod, bytes) and dep.encode() in mod) or (isinstance(mod, str) and dep in mod)
                    for mod in dependencies["module"]
                ), f"Expected dependency '{dep}' not found"
        else:
            # Otherwise check in the entire dependencies dictionary
            for dep in expected_dependencies:
                assert dep in str(dependencies), f"Expected dependency '{dep}' not found"
    else:
        print(f"KNOWN ISSUE: Expected dependencies {expected_dependencies}, but got empty result")

    # Check the second file for consistency
    dependencies_utils = get_dependencies(project=test_project["name"], file_path="utils.py")

    print("\nDependency analysis results for utils.py:")
    print(f"Dependencies: {dependencies_utils}")


def test_symbol_extraction_with_ast_access(test_project) -> None:
    """Test symbol extraction with direct AST access to identify where processing breaks."""
    # Get the AST for the file
    ast_result = get_ast(
        project=test_project["name"],
        path="test.py",
        max_depth=10,  # Deep enough to capture all relevant nodes
        include_text=True,
    )

    # Verify the AST is properly formed
    assert "tree" in ast_result, "AST result should contain 'tree'"

    # Extract the tree structure for analysis
    tree = ast_result["tree"]

    # Manually search for symbols in the AST
    functions = []
    classes = []
    imports = []

    def extract_symbols_manually(node, path=()) -> None:
        """Recursively extract symbols from the AST."""
        if not isinstance(node, dict):
            return

        node_type = node.get("type")

        # Identify function definitions
        if node_type == "function_definition":
            # Find the name node which is usually a direct child with type 'identifier'
            if "children" in node:
                for child in node["children"]:
                    if child.get("type") == "identifier":
                        functions.append(
                            {
                                "name": child.get("text"),
                                "path": path,
                                "node_id": node.get("id"),
                                "text": node.get("text", "").split("\n")[0][:50],  # First line, truncated
                            }
                        )
                        break

        # Identify class definitions
        elif node_type == "class_definition":
            # Find the name node
            if "children" in node:
                for child in node["children"]:
                    if child.get("type") == "identifier":
                        classes.append(
                            {
                                "name": child.get("text"),
                                "path": path,
                                "node_id": node.get("id"),
                                "text": node.get("text", "").split("\n")[0][:50],  # First line, truncated
                            }
                        )
                        break

        # Identify imports
        elif node_type in ("import_statement", "import_from_statement"):
            imports.append(
                {
                    "type": node_type,
                    "path": path,
                    "node_id": node.get("id"),
                    "text": node.get("text", "").split("\n")[0],  # First line
                }
            )

        # Recurse into children
        if "children" in node:
            for i, child in enumerate(node["children"]):
                extract_symbols_manually(child, path + (i,))

    # Extract symbols from the AST
    extract_symbols_manually(tree)

    # Print diagnostic information
    print("\nManual symbol extraction results:")
    print(f"Functions found: {len(functions)}")
    for func in functions:
        print(f"  {func['name']} - {func['text']}")

    print(f"Classes found: {len(classes)}")
    for cls in classes:
        print(f"  {cls['name']} - {cls['text']}")

    print(f"Imports found: {len(imports)}")
    for imp in imports:
        print(f"  {imp['type']} - {imp['text']}")

    # Expected counts
    assert len(functions) > 0, "Should find at least one function by manual extraction"
    assert len(classes) > 0, "Should find at least one class by manual extraction"
    assert len(imports) > 0, "Should find at least one import by manual extraction"

    # Compare with get_symbols results
    symbols = get_symbols(project=test_project["name"], file_path="test.py")

    print("\nComparison with get_symbols:")
    print(f"Manual functions: {len(functions)}, get_symbols: {len(symbols['functions'])}")
    print(f"Manual classes: {len(classes)}, get_symbols: {len(symbols['classes'])}")
    print(f"Manual imports: {len(imports)}, get_symbols: {len(symbols['imports'])}")


def test_query_based_symbol_extraction(test_project) -> None:
    """
    Test symbol extraction using direct tree-sitter queries to identify issues.

    This test demonstrates how query-based symbol extraction should work,
    which can help identify where the implementation breaks down.
    """
    try:
        # Import necessary components for direct query execution
        from tree_sitter import Parser, Query
        from tree_sitter_language_pack import get_language

        # Get Python language
        language_obj = get_language("python")

        # Create a parser
        parser = Parser()
        try:
            # Try set_language method first
            parser.set_language(language_obj)  # type: ignore
        except (AttributeError, TypeError):
            # Fall back to setting language property
            parser.language = language_obj

        # Read the file content
        file_path = os.path.join(test_project["path"], "test.py")
        with open(file_path, "rb") as f:
            content = f.read()

        # Parse the content
        tree = parser.parse(content)

        # Define queries for different symbol types
        function_query = """
            (function_definition
                name: (identifier) @function.name
                parameters: (parameters) @function.params
                body: (block) @function.body
            ) @function.def
        """

        class_query = """
            (class_definition
                name: (identifier) @class.name
                body: (block) @class.body
            ) @class.def
        """

        import_query = """
            (import_statement
                name: (dotted_name) @import.module
            ) @import

            (import_from_statement
                module_name: (dotted_name) @import.from
                name: (dotted_name) @import.item
            ) @import
        """

        # Run the queries
        functions_q = Query(language_obj, function_query)
        classes_q = Query(language_obj, class_query)
        imports_q = Query(language_obj, import_query)

        function_captures = functions_q.captures(tree.root_node)
        class_captures = classes_q.captures(tree.root_node)
        import_captures = imports_q.captures(tree.root_node)

        # Process and extract unique symbols
        functions: Dict[str, Dict[str, Any]] = {}
        classes: Dict[str, Dict[str, Any]] = {}
        imports: Dict[str, Dict[str, Any]] = {}

        # Helper function to process captures with different formats
        def process_capture(captures, target_type, result_dict) -> None:
            # Check if it's returning a dictionary format
            if isinstance(captures, dict):
                # Dictionary format: {capture_name: [node1, node2, ...], ...}
                for capture_name, nodes in captures.items():
                    if capture_name == target_type:
                        for node in nodes:
                            name = node.text.decode("utf-8") if hasattr(node.text, "decode") else str(node.text)
                            result_dict[name] = {
                                "name": name,
                                "start": node.start_point,
                                "end": node.end_point,
                            }
            else:
                # Assume it's a list of matches
                try:
                    # Try different formats
                    for item in captures:
                        # Could be tuple, object, or dict
                        if isinstance(item, tuple):
                            if len(item) == 2:
                                node, capture_name = item
                            else:
                                continue  # Skip if unexpected tuple size
                        elif hasattr(item, "node") and hasattr(item, "capture_name"):
                            node, capture_name = item.node, item.capture_name
                        elif isinstance(item, dict) and "node" in item and "capture" in item:
                            node, capture_name = item["node"], item["capture"]
                        else:
                            continue  # Skip if format unknown

                        if capture_name == target_type:
                            name = node.text.decode("utf-8") if hasattr(node.text, "decode") else str(node.text)
                            result_dict[name] = {
                                "name": name,
                                "start": node.start_point,
                                "end": node.end_point,
                            }
                except Exception as e:
                    print(f"Error processing captures: {str(e)}")

        # Process each type of capture
        process_capture(function_captures, "function.name", functions)
        process_capture(class_captures, "class.name", classes)

        # For imports, use a separate function since the comparison is different
        def process_import_capture(captures) -> None:
            # Check if it's returning a dictionary format
            if isinstance(captures, dict):
                # Dictionary format: {capture_name: [node1, node2, ...], ...}
                for capture_name, nodes in captures.items():
                    if capture_name in ("import.module", "import.from", "import.item"):
                        for node in nodes:
                            name = node.text.decode("utf-8") if hasattr(node.text, "decode") else str(node.text)
                            imports[name] = {
                                "name": name,
                                "type": capture_name,
                                "start": node.start_point,
                                "end": node.end_point,
                            }
            else:
                # Assume it's a list of matches
                try:
                    # Try different formats
                    for item in captures:
                        # Could be tuple, object, or dict
                        if isinstance(item, tuple):
                            if len(item) == 2:
                                node, capture_name = item
                            else:
                                continue  # Skip if unexpected tuple size
                        elif hasattr(item, "node") and hasattr(item, "capture_name"):
                            node, capture_name = item.node, item.capture_name
                        elif isinstance(item, dict) and "node" in item and "capture" in item:
                            node, capture_name = item["node"], item["capture"]
                        else:
                            continue  # Skip if format unknown

                        if capture_name in (
                            "import.module",
                            "import.from",
                            "import.item",
                        ):
                            name = node.text.decode("utf-8") if hasattr(node.text, "decode") else str(node.text)
                            imports[name] = {
                                "name": name,
                                "type": capture_name,
                                "start": node.start_point,
                                "end": node.end_point,
                            }
                except Exception as e:
                    print(f"Error processing import captures: {str(e)}")

        # Call the import capture processing function
        process_import_capture(import_captures)

        # Print the direct query results
        print("\nDirect query results:")
        print(f"Functions: {list(functions.keys())}")
        print(f"Classes: {list(classes.keys())}")
        print(f"Imports: {list(imports.keys())}")

        # Compare with get_symbols
        symbols = get_symbols(project=test_project["name"], file_path="test.py")

        print("\nComparison with get_symbols:")
        print(f"Query functions: {len(functions)}, get_symbols: {len(symbols['functions'])}")
        print(f"Query classes: {len(classes)}, get_symbols: {len(symbols['classes'])}")
        print(f"Query imports: {len(imports)}, get_symbols: {len(symbols['imports'])}")

        # Document any differences that might indicate where the issue lies
        if len(functions) != len(symbols["functions"]):
            print("ISSUE: Function count mismatch")

        if len(classes) != len(symbols["classes"]):
            print("ISSUE: Class count mismatch")

        if len(imports) != len(symbols["imports"]):
            print("ISSUE: Import count mismatch")

    except Exception as e:
        print(f"Error in direct query execution: {str(e)}")
        pytest.fail(f"Direct query execution failed: {str(e)}")


def test_debug_file_saving(test_project) -> None:
    """Save debug information to files for further analysis."""
    # Create a debug directory
    debug_dir = os.path.join(test_project["path"], "debug")
    os.makedirs(debug_dir, exist_ok=True)

    # Get AST and symbol information
    ast_result = get_ast(project=test_project["name"], path="test.py", max_depth=10, include_text=True)

    symbols = get_symbols(project=test_project["name"], file_path="test.py")

    dependencies = get_dependencies(project=test_project["name"], file_path="test.py")

    # Define a custom JSON encoder for bytes objects
    class BytesEncoder(json.JSONEncoder):
        def default(self, obj):
            if isinstance(obj, bytes):
                return obj.decode("utf-8", errors="replace")
            return super().default(obj)

    # Save the information to files
    with open(os.path.join(debug_dir, "ast.json"), "w") as f:
        json.dump(ast_result, f, indent=2, cls=BytesEncoder)

    with open(os.path.join(debug_dir, "symbols.json"), "w") as f:
        json.dump(symbols, f, indent=2, cls=BytesEncoder)

    with open(os.path.join(debug_dir, "dependencies.json"), "w") as f:
        json.dump(dependencies, f, indent=2, cls=BytesEncoder)

    print(f"\nDebug information saved to {debug_dir}")

```

--------------------------------------------------------------------------------
/src/mcp_server_tree_sitter/tools/registration.py:
--------------------------------------------------------------------------------

```python
"""Tool registration with dependency injection for MCP server.

This module centralizes all tool registrations with proper dependency injection,
removing the need for global variables or singletons.
"""

import logging
import os
from typing import Any, Dict, List, Optional

from ..di import DependencyContainer
from ..exceptions import ProjectError

logger = logging.getLogger(__name__)


def register_tools(mcp_server: Any, container: DependencyContainer) -> None:
    """Register all MCP tools with dependency injection.

    Args:
        mcp_server: MCP server instance
        container: Dependency container
    """
    # Access dependencies
    config_manager = container.config_manager
    tree_cache = container.tree_cache
    project_registry = container.project_registry
    language_registry = container.language_registry

    # Configuration Tool
    @mcp_server.tool()
    def configure(
        config_path: Optional[str] = None,
        cache_enabled: Optional[bool] = None,
        max_file_size_mb: Optional[int] = None,
        log_level: Optional[str] = None,
    ) -> Dict[str, Any]:
        """Configure the server.

        Args:
            config_path: Path to YAML config file
            cache_enabled: Whether to enable parse tree caching
            max_file_size_mb: Maximum file size in MB
            log_level: Logging level (DEBUG, INFO, WARNING, ERROR)

        Returns:
            Current configuration
        """
        # Get initial config for comparison
        initial_config = config_manager.get_config()
        logger.info(
            f"Initial configuration: "
            f"cache.max_size_mb = {initial_config.cache.max_size_mb}, "
            f"security.max_file_size_mb = {initial_config.security.max_file_size_mb}, "
            f"language.default_max_depth = {initial_config.language.default_max_depth}"
        )

        # Load config if path provided
        if config_path:
            logger.info(f"Configuring server with YAML config from: {config_path}")
            # Log absolute path to ensure we're looking at the right file
            abs_path = os.path.abspath(config_path)
            logger.info(f"Absolute path: {abs_path}")

            # Check if the file exists before trying to load it
            if not os.path.exists(abs_path):
                logger.error(f"Config file does not exist: {abs_path}")

            config_manager.load_from_file(abs_path)

        # Update specific settings
        if cache_enabled is not None:
            logger.info(f"Setting cache.enabled to {cache_enabled}")
            config_manager.update_value("cache.enabled", cache_enabled)
            tree_cache.set_enabled(cache_enabled)

        if max_file_size_mb is not None:
            logger.info(f"Setting security.max_file_size_mb to {max_file_size_mb}")
            config_manager.update_value("security.max_file_size_mb", max_file_size_mb)

        if log_level is not None:
            logger.info(f"Setting log_level to {log_level}")
            config_manager.update_value("log_level", log_level)

        # Return current config as dict
        return config_manager.to_dict()

    # Project Management Tools
    @mcp_server.tool()
    def register_project_tool(
        path: str, name: Optional[str] = None, description: Optional[str] = None
    ) -> Dict[str, Any]:
        """Register a project directory for code exploration.

        Args:
            path: Path to the project directory
            name: Optional name for the project (defaults to directory name)
            description: Optional description of the project

        Returns:
            Project information
        """
        try:
            # Register project
            project = project_registry.register_project(name or path, path, description)

            # Scan for languages
            project.scan_files(language_registry)

            return project.to_dict()
        except Exception as e:
            raise ProjectError(f"Failed to register project: {e}") from e

    @mcp_server.tool()
    def list_projects_tool() -> List[Dict[str, Any]]:
        """List all registered projects.

        Returns:
            List of project information
        """
        return project_registry.list_projects()

    @mcp_server.tool()
    def remove_project_tool(name: str) -> Dict[str, str]:
        """Remove a registered project.

        Args:
            name: Project name

        Returns:
            Success message
        """
        try:
            project_registry.remove_project(name)
            return {"status": "success", "message": f"Project '{name}' removed"}
        except Exception as e:
            raise ProjectError(f"Failed to remove project: {e}") from e

    # Language Tools
    @mcp_server.tool()
    def list_languages() -> Dict[str, Any]:
        """List available languages.

        Returns:
            Information about available languages
        """
        available = language_registry.list_available_languages()

        return {
            "available": available,
            "installable": [],  # No separate installation needed with language-pack
        }

    @mcp_server.tool()
    def check_language_available(language: str) -> Dict[str, str]:
        """Check if a tree-sitter language parser is available.

        Args:
            language: Language to check

        Returns:
            Success message
        """
        if language_registry.is_language_available(language):
            return {
                "status": "success",
                "message": f"Language '{language}' is available via tree-sitter-language-pack",
            }
        else:
            return {
                "status": "error",
                "message": f"Language '{language}' is not available",
            }

    # File Operations Tools
    @mcp_server.tool()
    def list_files(
        project: str,
        pattern: Optional[str] = None,
        max_depth: Optional[int] = None,
        extensions: Optional[List[str]] = None,
    ) -> List[str]:
        """List files in a project.

        Args:
            project: Project name
            pattern: Optional glob pattern (e.g., "**/*.py")
            max_depth: Maximum directory depth
            extensions: List of file extensions to include (without dot)

        Returns:
            List of file paths
        """
        from ..tools.file_operations import list_project_files

        return list_project_files(project_registry.get_project(project), pattern, max_depth, extensions)

    @mcp_server.tool()
    def get_file(project: str, path: str, max_lines: Optional[int] = None, start_line: int = 0) -> str:
        """Get content of a file.

        Args:
            project: Project name
            path: File path relative to project root
            max_lines: Maximum number of lines to return
            start_line: First line to include (0-based)

        Returns:
            File content
        """
        from ..tools.file_operations import get_file_content

        return get_file_content(project_registry.get_project(project), path, max_lines=max_lines, start_line=start_line)

    @mcp_server.tool()
    def get_file_metadata(project: str, path: str) -> Dict[str, Any]:
        """Get metadata for a file.

        Args:
            project: Project name
            path: File path relative to project root

        Returns:
            File metadata
        """
        from ..tools.file_operations import get_file_info

        return get_file_info(project_registry.get_project(project), path)

    # AST Analysis Tools
    @mcp_server.tool()
    def get_ast(project: str, path: str, max_depth: Optional[int] = None, include_text: bool = True) -> Dict[str, Any]:
        """Get abstract syntax tree for a file.

        Args:
            project: Project name
            path: File path relative to project root
            max_depth: Maximum depth of the tree (default: 5)
            include_text: Whether to include node text

        Returns:
            AST as a nested dictionary
        """
        from ..tools.ast_operations import get_file_ast

        config = config_manager.get_config()
        depth = max_depth or config.language.default_max_depth

        return get_file_ast(
            project_registry.get_project(project),
            path,
            language_registry,
            tree_cache,
            max_depth=depth,
            include_text=include_text,
        )

    @mcp_server.tool()
    def get_node_at_position(project: str, path: str, row: int, column: int) -> Optional[Dict[str, Any]]:
        """Find the AST node at a specific position.

        Args:
            project: Project name
            path: File path relative to project root
            row: Line number (0-based)
            column: Column number (0-based)

        Returns:
            Node information or None if not found
        """
        from ..models.ast import node_to_dict
        from ..tools.ast_operations import find_node_at_position

        project_obj = project_registry.get_project(project)
        file_path = project_obj.get_file_path(path)

        language = language_registry.language_for_file(path)
        if not language:
            raise ValueError(f"Could not detect language for {path}")

        from ..tools.ast_operations import parse_file as parse_file_helper

        tree, source_bytes = parse_file_helper(file_path, language, language_registry, tree_cache)

        node = find_node_at_position(tree.root_node, row, column)
        if node:
            return node_to_dict(node, source_bytes, max_depth=2)

        return None

    # Search and Query Tools
    @mcp_server.tool()
    def find_text(
        project: str,
        pattern: str,
        file_pattern: Optional[str] = None,
        max_results: int = 100,
        case_sensitive: bool = False,
        whole_word: bool = False,
        use_regex: bool = False,
        context_lines: int = 2,
    ) -> List[Dict[str, Any]]:
        """Search for text pattern in project files.

        Args:
            project: Project name
            pattern: Text pattern to search for
            file_pattern: Optional glob pattern (e.g., "**/*.py")
            max_results: Maximum number of results
            case_sensitive: Whether to do case-sensitive matching
            whole_word: Whether to match whole words only
            use_regex: Whether to treat pattern as a regular expression
            context_lines: Number of context lines to include

        Returns:
            List of matches with file, line number, and text
        """
        from ..tools.search import search_text

        config = config_manager.get_config()

        return search_text(
            project_registry.get_project(project),
            pattern,
            file_pattern,
            max_results if max_results is not None else config.max_results_default,
            case_sensitive,
            whole_word,
            use_regex,
            context_lines,
        )

    @mcp_server.tool()
    def run_query(
        project: str,
        query: str,
        file_path: Optional[str] = None,
        language: Optional[str] = None,
        max_results: int = 100,
    ) -> List[Dict[str, Any]]:
        """Run a tree-sitter query on project files.

        Args:
            project: Project name
            query: Tree-sitter query string
            file_path: Optional specific file to query
            language: Language to use (required if file_path not provided)
            max_results: Maximum number of results

        Returns:
            List of query matches
        """
        from ..tools.search import query_code

        config = config_manager.get_config()

        return query_code(
            project_registry.get_project(project),
            query,
            language_registry,
            tree_cache,
            file_path,
            language,
            max_results if max_results is not None else config.max_results_default,
        )

    @mcp_server.tool()
    def get_query_template_tool(language: str, template_name: str) -> Dict[str, Any]:
        """Get a predefined tree-sitter query template.

        Args:
            language: Language name
            template_name: Template name (e.g., "functions", "classes")

        Returns:
            Query template information
        """
        from ..language.query_templates import get_query_template

        template = get_query_template(language, template_name)
        if not template:
            raise ValueError(f"No template '{template_name}' for language '{language}'")

        return {
            "language": language,
            "name": template_name,
            "query": template,
        }

    @mcp_server.tool()
    def list_query_templates_tool(language: Optional[str] = None) -> Dict[str, Any]:
        """List available query templates.

        Args:
            language: Optional language to filter by

        Returns:
            Available templates
        """
        from ..language.query_templates import list_query_templates

        return list_query_templates(language)

    @mcp_server.tool()
    def build_query(language: str, patterns: List[str], combine: str = "or") -> Dict[str, str]:
        """Build a tree-sitter query from templates or patterns.

        Args:
            language: Language name
            patterns: List of template names or custom patterns
            combine: How to combine patterns ("or" or "and")

        Returns:
            Combined query
        """
        from ..tools.query_builder import build_compound_query

        query = build_compound_query(language, patterns, combine)
        return {
            "language": language,
            "query": query,
        }

    @mcp_server.tool()
    def adapt_query(query: str, from_language: str, to_language: str) -> Dict[str, str]:
        """Adapt a query from one language to another.

        Args:
            query: Original query string
            from_language: Source language
            to_language: Target language

        Returns:
            Adapted query
        """
        from ..tools.query_builder import adapt_query_for_language

        adapted = adapt_query_for_language(query, from_language, to_language)
        return {
            "original_language": from_language,
            "target_language": to_language,
            "original_query": query,
            "adapted_query": adapted,
        }

    @mcp_server.tool()
    def get_node_types(language: str) -> Dict[str, str]:
        """Get descriptions of common node types for a language.

        Args:
            language: Language name

        Returns:
            Dictionary of node types and descriptions
        """
        from ..tools.query_builder import describe_node_types

        return describe_node_types(language)

    # Analysis Tools
    @mcp_server.tool()
    def get_symbols(
        project: str, file_path: str, symbol_types: Optional[List[str]] = None
    ) -> Dict[str, List[Dict[str, Any]]]:
        """Extract symbols from a file.

        Args:
            project: Project name
            file_path: Path to the file
            symbol_types: Types of symbols to extract (functions, classes, imports, etc.)

        Returns:
            Dictionary of symbols by type
        """
        from ..tools.analysis import extract_symbols

        return extract_symbols(project_registry.get_project(project), file_path, language_registry, symbol_types)

    @mcp_server.tool()
    def analyze_project(project: str, scan_depth: int = 3, ctx: Optional[Any] = None) -> Dict[str, Any]:
        """Analyze overall project structure.

        Args:
            project: Project name
            scan_depth: Depth of detailed analysis (higher is slower)
            ctx: Optional MCP context for progress reporting

        Returns:
            Project analysis
        """
        from ..tools.analysis import analyze_project_structure

        return analyze_project_structure(project_registry.get_project(project), language_registry, scan_depth, ctx)

    @mcp_server.tool()
    def get_dependencies(project: str, file_path: str) -> Dict[str, List[str]]:
        """Find dependencies of a file.

        Args:
            project: Project name
            file_path: Path to the file

        Returns:
            Dictionary of imports/includes
        """
        from ..tools.analysis import find_dependencies

        return find_dependencies(
            project_registry.get_project(project),
            file_path,
            language_registry,
        )

    @mcp_server.tool()
    def analyze_complexity(project: str, file_path: str) -> Dict[str, Any]:
        """Analyze code complexity.

        Args:
            project: Project name
            file_path: Path to the file

        Returns:
            Complexity metrics
        """
        from ..tools.analysis import analyze_code_complexity

        return analyze_code_complexity(
            project_registry.get_project(project),
            file_path,
            language_registry,
        )

    @mcp_server.tool()
    def find_similar_code(
        project: str,
        snippet: str,
        language: Optional[str] = None,
        threshold: float = 0.8,
        max_results: int = 10,
    ) -> List[Dict[str, Any]]:
        """Find similar code to a snippet.

        Args:
            project: Project name
            snippet: Code snippet to find
            language: Language of the snippet
            threshold: Similarity threshold (0.0-1.0)
            max_results: Maximum number of results

        Returns:
            List of similar code locations
        """
        # This is a simple implementation that uses text search
        from ..tools.search import search_text

        # Clean the snippet to handle potential whitespace differences
        clean_snippet = snippet.strip()

        # Map language names to file extensions
        extension_map = {
            "python": "py",
            "javascript": "js",
            "typescript": "ts",
            "rust": "rs",
            "go": "go",
            "java": "java",
            "c": "c",
            "cpp": "cpp",
            "ruby": "rb",
            "swift": "swift",
            "kotlin": "kt",
        }

        # Get the appropriate file extension for the language
        extension = extension_map.get(language, language) if language else None
        file_pattern = f"**/*.{extension}" if extension else None

        return search_text(
            project_registry.get_project(project),
            clean_snippet,
            file_pattern=file_pattern,
            max_results=max_results,
            case_sensitive=False,  # Ignore case differences
            whole_word=False,  # Allow partial matches
            use_regex=False,  # Simple text search is more reliable for this case
        )

    @mcp_server.tool()
    def find_usage(
        project: str,
        symbol: str,
        file_path: Optional[str] = None,
        language: Optional[str] = None,
    ) -> List[Dict[str, Any]]:
        """Find usage of a symbol.

        Args:
            project: Project name
            symbol: Symbol name to find
            file_path: Optional file to look in (for local symbols)
            language: Language to search in

        Returns:
            List of usage locations
        """
        # Detect language if not provided but file_path is
        if not language and file_path:
            language = language_registry.language_for_file(file_path)

        if not language:
            raise ValueError("Either language or file_path must be provided")

        # Build a query to find references to the symbol
        query = f"""
        (
          (identifier) @reference
          (#eq? @reference "{symbol}")
        )
        """

        from ..tools.search import query_code

        return query_code(
            project_registry.get_project(project), query, language_registry, tree_cache, file_path, language
        )

    # Cache Management
    @mcp_server.tool()
    def clear_cache(project: Optional[str] = None, file_path: Optional[str] = None) -> Dict[str, str]:
        """Clear the parse tree cache.

        Args:
            project: Optional project to clear cache for
            file_path: Optional specific file to clear cache for

        Returns:
            Status message
        """
        if project and file_path:
            # Clear cache for specific file
            project_obj = project_registry.get_project(project)
            abs_path = project_obj.get_file_path(file_path)
            tree_cache.invalidate(abs_path)
            message = f"Cache cleared for {file_path} in project {project}"
        elif project:
            # Clear cache for entire project
            # No direct way to clear by project, so invalidate entire cache
            tree_cache.invalidate()
            message = f"Cache cleared for project {project}"
        else:
            # Clear entire cache
            tree_cache.invalidate()
            message = "All caches cleared"

        return {"status": "success", "message": message}

    # Debug Tools
    @mcp_server.tool()
    def diagnose_config(config_path: str) -> Dict[str, Any]:
        """Diagnose issues with YAML configuration loading.

        Args:
            config_path: Path to YAML config file

        Returns:
            Diagnostic information
        """
        from ..tools.debug import diagnose_yaml_config

        return diagnose_yaml_config(config_path)

    # Register Prompts
    _register_prompts(mcp_server, container)


def _register_prompts(mcp_server: Any, container: DependencyContainer) -> None:
    """Register all prompt templates with dependency injection.

    Args:
        mcp_server: MCP server instance
        container: Dependency container
    """
    # Get dependencies
    project_registry = container.project_registry
    language_registry = container.language_registry

    @mcp_server.prompt()
    def code_review(project: str, file_path: str) -> str:
        """Create a prompt for reviewing a code file"""
        from ..tools.analysis import extract_symbols
        from ..tools.file_operations import get_file_content

        project_obj = project_registry.get_project(project)
        content = get_file_content(project_obj, file_path)
        language = language_registry.language_for_file(file_path)

        # Get structure information
        structure = ""
        try:
            symbols = extract_symbols(project_obj, file_path, language_registry)

            if "functions" in symbols and symbols["functions"]:
                structure += "\nFunctions:\n"
                for func in symbols["functions"]:
                    structure += f"- {func['name']}\n"

            if "classes" in symbols and symbols["classes"]:
                structure += "\nClasses:\n"
                for cls in symbols["classes"]:
                    structure += f"- {cls['name']}\n"
        except Exception:
            pass

        return f"""
        Please review this {language} code file:

        ```{language}
        {content}
        ```

        {structure}

        Focus on:
        1. Code clarity and organization
        2. Potential bugs or issues
        3. Performance considerations
        4. Best practices for {language}
        """

    @mcp_server.prompt()
    def explain_code(project: str, file_path: str, focus: Optional[str] = None) -> str:
        """Create a prompt for explaining a code file"""
        from ..tools.file_operations import get_file_content

        project_obj = project_registry.get_project(project)
        content = get_file_content(project_obj, file_path)
        language = language_registry.language_for_file(file_path)

        focus_prompt = ""
        if focus:
            focus_prompt = f"\nPlease focus specifically on explaining: {focus}"

        return f"""
        Please explain this {language} code file:

        ```{language}
        {content}
        ```

        Provide a clear explanation of:
        1. What this code does
        2. How it's structured
        3. Any important patterns or techniques used
        {focus_prompt}
        """

    @mcp_server.prompt()
    def explain_tree_sitter_query() -> str:
        """Create a prompt explaining tree-sitter query syntax"""
        return """
        Tree-sitter queries use S-expression syntax to match patterns in code.

        Basic query syntax:
        - `(node_type)` - Match nodes of a specific type
        - `(node_type field: (child_type))` - Match nodes with specific field relationships
        - `@name` - Capture a node with a name
        - `#predicate` - Apply additional constraints

        Example query for Python functions:
        ```
        (function_definition
          name: (identifier) @function.name
          parameters: (parameters) @function.params
          body: (block) @function.body) @function.def
        ```

        Please write a tree-sitter query to find:
        """

    @mcp_server.prompt()
    def suggest_improvements(project: str, file_path: str) -> str:
        """Create a prompt for suggesting code improvements"""
        from ..tools.analysis import analyze_code_complexity
        from ..tools.file_operations import get_file_content

        project_obj = project_registry.get_project(project)
        content = get_file_content(project_obj, file_path)
        language = language_registry.language_for_file(file_path)

        try:
            complexity = analyze_code_complexity(project_obj, file_path, language_registry)
            complexity_info = f"""
            Code metrics:
            - Line count: {complexity["line_count"]}
            - Code lines: {complexity["code_lines"]}
            - Comment lines: {complexity["comment_lines"]}
            - Comment ratio: {complexity["comment_ratio"]:.1%}
            - Functions: {complexity["function_count"]}
            - Classes: {complexity["class_count"]}
            - Avg. function length: {complexity["avg_function_lines"]} lines
            - Cyclomatic complexity: {complexity["cyclomatic_complexity"]}
            """
        except Exception:
            complexity_info = ""

        return f"""
        Please suggest improvements for this {language} code:

        ```{language}
        {content}
        ```

        {complexity_info}

        Suggest specific, actionable improvements for:
        1. Code quality and readability
        2. Performance optimization
        3. Error handling and robustness
        4. Following {language} best practices

        Where possible, provide code examples of your suggestions.
        """

    @mcp_server.prompt()
    def project_overview(project: str) -> str:
        """Create a prompt for a project overview analysis"""
        from ..tools.analysis import analyze_project_structure

        project_obj = project_registry.get_project(project)

        try:
            analysis = analyze_project_structure(project_obj, language_registry)

            languages_str = "\n".join(f"- {lang}: {count} files" for lang, count in analysis["languages"].items())

            entry_points_str = (
                "\n".join(f"- {entry['path']} ({entry['language']})" for entry in analysis["entry_points"])
                if analysis["entry_points"]
                else "None detected"
            )

            build_files_str = (
                "\n".join(f"- {file['path']} ({file['type']})" for file in analysis["build_files"])
                if analysis["build_files"]
                else "None detected"
            )

        except Exception:
            languages_str = "Error analyzing languages"
            entry_points_str = "Error detecting entry points"
            build_files_str = "Error detecting build files"

        return f"""
        Please analyze this codebase:

        Project name: {project_obj.name}
        Path: {project_obj.root_path}

        Languages:
        {languages_str}

        Possible entry points:
        {entry_points_str}

        Build configuration:
        {build_files_str}

        Based on this information, please:
        1. Provide an overview of what this project seems to be
        2. Identify the main components and their relationships
        3. Suggest where to start exploring the codebase
        4. Identify any patterns or architectural approaches used
        """

```
Page 3/4FirstPrevNextLast