This is page 3 of 14. Use http://codebase.md/oraios/serena?lines=true&page={x} to view the full context. # Directory Structure ``` ├── .devcontainer │ └── devcontainer.json ├── .dockerignore ├── .env.example ├── .github │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE │ │ ├── config.yml │ │ ├── feature_request.md │ │ └── issue--bug--performance-problem--question-.md │ └── workflows │ ├── codespell.yml │ ├── docker.yml │ ├── junie.yml │ ├── lint_and_docs.yaml │ ├── publish.yml │ └── pytest.yml ├── .gitignore ├── .serena │ ├── memories │ │ ├── adding_new_language_support_guide.md │ │ ├── serena_core_concepts_and_architecture.md │ │ ├── serena_repository_structure.md │ │ └── suggested_commands.md │ └── project.yml ├── .vscode │ └── settings.json ├── CHANGELOG.md ├── CLAUDE.md ├── compose.yaml ├── CONTRIBUTING.md ├── docker_build_and_run.sh ├── DOCKER.md ├── Dockerfile ├── docs │ ├── custom_agent.md │ └── serena_on_chatgpt.md ├── flake.lock ├── flake.nix ├── lessons_learned.md ├── LICENSE ├── llms-install.md ├── public │ └── .gitignore ├── pyproject.toml ├── README.md ├── resources │ ├── serena-icons.cdr │ ├── serena-logo-dark-mode.svg │ ├── serena-logo.cdr │ ├── serena-logo.svg │ └── vscode_sponsor_logo.png ├── roadmap.md ├── scripts │ ├── agno_agent.py │ ├── demo_run_tools.py │ ├── gen_prompt_factory.py │ ├── mcp_server.py │ ├── print_mode_context_options.py │ └── print_tool_overview.py ├── src │ ├── interprompt │ │ ├── __init__.py │ │ ├── .syncCommitId.remote │ │ ├── .syncCommitId.this │ │ ├── jinja_template.py │ │ ├── multilang_prompt.py │ │ ├── prompt_factory.py │ │ └── util │ │ ├── __init__.py │ │ └── class_decorators.py │ ├── README.md │ ├── serena │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── agno.py │ │ ├── analytics.py │ │ ├── cli.py │ │ ├── code_editor.py │ │ ├── config │ │ │ ├── __init__.py │ │ │ ├── context_mode.py │ │ │ └── serena_config.py │ │ ├── constants.py │ │ ├── dashboard.py │ │ ├── generated │ │ │ └── generated_prompt_factory.py │ │ ├── gui_log_viewer.py │ │ ├── mcp.py │ │ ├── project.py │ │ ├── prompt_factory.py │ │ ├── resources │ │ │ ├── config │ │ │ │ ├── contexts │ │ │ │ │ ├── agent.yml │ │ │ │ │ ├── chatgpt.yml │ │ │ │ │ ├── codex.yml │ │ │ │ │ ├── context.template.yml │ │ │ │ │ ├── desktop-app.yml │ │ │ │ │ ├── ide-assistant.yml │ │ │ │ │ └── oaicompat-agent.yml │ │ │ │ ├── internal_modes │ │ │ │ │ └── jetbrains.yml │ │ │ │ ├── modes │ │ │ │ │ ├── editing.yml │ │ │ │ │ ├── interactive.yml │ │ │ │ │ ├── mode.template.yml │ │ │ │ │ ├── no-onboarding.yml │ │ │ │ │ ├── onboarding.yml │ │ │ │ │ ├── one-shot.yml │ │ │ │ │ └── planning.yml │ │ │ │ └── prompt_templates │ │ │ │ ├── simple_tool_outputs.yml │ │ │ │ └── system_prompt.yml │ │ │ ├── dashboard │ │ │ │ ├── dashboard.js │ │ │ │ ├── index.html │ │ │ │ ├── jquery.min.js │ │ │ │ ├── serena-icon-16.png │ │ │ │ ├── serena-icon-32.png │ │ │ │ ├── serena-icon-48.png │ │ │ │ ├── serena-logs-dark-mode.png │ │ │ │ └── serena-logs.png │ │ │ ├── project.template.yml │ │ │ └── serena_config.template.yml │ │ ├── symbol.py │ │ ├── text_utils.py │ │ ├── tools │ │ │ ├── __init__.py │ │ │ ├── cmd_tools.py │ │ │ ├── config_tools.py │ │ │ ├── file_tools.py │ │ │ ├── jetbrains_plugin_client.py │ │ │ ├── jetbrains_tools.py │ │ │ ├── memory_tools.py │ │ │ ├── symbol_tools.py │ │ │ ├── tools_base.py │ │ │ └── workflow_tools.py │ │ └── util │ │ ├── class_decorators.py │ │ ├── exception.py │ │ ├── file_system.py │ │ ├── general.py │ │ ├── git.py │ │ ├── inspection.py │ │ ├── logging.py │ │ ├── shell.py │ │ └── thread.py │ └── solidlsp │ ├── __init__.py │ ├── .gitignore │ ├── language_servers │ │ ├── al_language_server.py │ │ ├── bash_language_server.py │ │ ├── clangd_language_server.py │ │ ├── clojure_lsp.py │ │ ├── common.py │ │ ├── csharp_language_server.py │ │ ├── dart_language_server.py │ │ ├── eclipse_jdtls.py │ │ ├── elixir_tools │ │ │ ├── __init__.py │ │ │ ├── elixir_tools.py │ │ │ └── README.md │ │ ├── elm_language_server.py │ │ ├── erlang_language_server.py │ │ ├── gopls.py │ │ ├── intelephense.py │ │ ├── jedi_server.py │ │ ├── kotlin_language_server.py │ │ ├── lua_ls.py │ │ ├── marksman.py │ │ ├── nixd_ls.py │ │ ├── omnisharp │ │ │ ├── initialize_params.json │ │ │ ├── runtime_dependencies.json │ │ │ └── workspace_did_change_configuration.json │ │ ├── omnisharp.py │ │ ├── perl_language_server.py │ │ ├── pyright_server.py │ │ ├── r_language_server.py │ │ ├── ruby_lsp.py │ │ ├── rust_analyzer.py │ │ ├── solargraph.py │ │ ├── sourcekit_lsp.py │ │ ├── terraform_ls.py │ │ ├── typescript_language_server.py │ │ ├── vts_language_server.py │ │ └── zls.py │ ├── ls_config.py │ ├── ls_exceptions.py │ ├── ls_handler.py │ ├── ls_logger.py │ ├── ls_request.py │ ├── ls_types.py │ ├── ls_utils.py │ ├── ls.py │ ├── lsp_protocol_handler │ │ ├── lsp_constants.py │ │ ├── lsp_requests.py │ │ ├── lsp_types.py │ │ └── server.py │ ├── settings.py │ └── util │ ├── subprocess_util.py │ └── zip.py ├── test │ ├── __init__.py │ ├── conftest.py │ ├── resources │ │ └── repos │ │ ├── al │ │ │ └── test_repo │ │ │ ├── app.json │ │ │ └── src │ │ │ ├── Codeunits │ │ │ │ ├── CustomerMgt.Codeunit.al │ │ │ │ └── PaymentProcessorImpl.Codeunit.al │ │ │ ├── Enums │ │ │ │ └── CustomerType.Enum.al │ │ │ ├── Interfaces │ │ │ │ └── IPaymentProcessor.Interface.al │ │ │ ├── Pages │ │ │ │ ├── CustomerCard.Page.al │ │ │ │ └── CustomerList.Page.al │ │ │ ├── TableExtensions │ │ │ │ └── Item.TableExt.al │ │ │ └── Tables │ │ │ └── Customer.Table.al │ │ ├── bash │ │ │ └── test_repo │ │ │ ├── config.sh │ │ │ ├── main.sh │ │ │ └── utils.sh │ │ ├── clojure │ │ │ └── test_repo │ │ │ ├── deps.edn │ │ │ └── src │ │ │ └── test_app │ │ │ ├── core.clj │ │ │ └── utils.clj │ │ ├── csharp │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── Models │ │ │ │ └── Person.cs │ │ │ ├── Program.cs │ │ │ ├── serena.sln │ │ │ └── TestProject.csproj │ │ ├── dart │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── lib │ │ │ │ ├── helper.dart │ │ │ │ ├── main.dart │ │ │ │ └── models.dart │ │ │ └── pubspec.yaml │ │ ├── elixir │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── lib │ │ │ │ ├── examples.ex │ │ │ │ ├── ignored_dir │ │ │ │ │ └── ignored_module.ex │ │ │ │ ├── models.ex │ │ │ │ ├── services.ex │ │ │ │ ├── test_repo.ex │ │ │ │ └── utils.ex │ │ │ ├── mix.exs │ │ │ ├── mix.lock │ │ │ ├── scripts │ │ │ │ └── build_script.ex │ │ │ └── test │ │ │ ├── models_test.exs │ │ │ └── test_repo_test.exs │ │ ├── elm │ │ │ └── test_repo │ │ │ ├── elm.json │ │ │ ├── Main.elm │ │ │ └── Utils.elm │ │ ├── erlang │ │ │ └── test_repo │ │ │ ├── hello.erl │ │ │ ├── ignored_dir │ │ │ │ └── ignored_module.erl │ │ │ ├── include │ │ │ │ ├── records.hrl │ │ │ │ └── types.hrl │ │ │ ├── math_utils.erl │ │ │ ├── rebar.config │ │ │ ├── src │ │ │ │ ├── app.erl │ │ │ │ ├── models.erl │ │ │ │ ├── services.erl │ │ │ │ └── utils.erl │ │ │ └── test │ │ │ ├── models_tests.erl │ │ │ └── utils_tests.erl │ │ ├── go │ │ │ └── test_repo │ │ │ └── main.go │ │ ├── java │ │ │ └── test_repo │ │ │ ├── pom.xml │ │ │ └── src │ │ │ └── main │ │ │ └── java │ │ │ └── test_repo │ │ │ ├── Main.java │ │ │ ├── Model.java │ │ │ ├── ModelUser.java │ │ │ └── Utils.java │ │ ├── kotlin │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── build.gradle.kts │ │ │ └── src │ │ │ └── main │ │ │ └── kotlin │ │ │ └── test_repo │ │ │ ├── Main.kt │ │ │ ├── Model.kt │ │ │ ├── ModelUser.kt │ │ │ └── Utils.kt │ │ ├── lua │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── main.lua │ │ │ ├── src │ │ │ │ ├── calculator.lua │ │ │ │ └── utils.lua │ │ │ └── tests │ │ │ └── test_calculator.lua │ │ ├── markdown │ │ │ └── test_repo │ │ │ ├── api.md │ │ │ ├── CONTRIBUTING.md │ │ │ ├── guide.md │ │ │ └── README.md │ │ ├── nix │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── default.nix │ │ │ ├── flake.nix │ │ │ ├── lib │ │ │ │ └── utils.nix │ │ │ ├── modules │ │ │ │ └── example.nix │ │ │ └── scripts │ │ │ └── hello.sh │ │ ├── perl │ │ │ └── test_repo │ │ │ ├── helper.pl │ │ │ └── main.pl │ │ ├── php │ │ │ └── test_repo │ │ │ ├── helper.php │ │ │ ├── index.php │ │ │ └── simple_var.php │ │ ├── python │ │ │ └── test_repo │ │ │ ├── .gitignore │ │ │ ├── custom_test │ │ │ │ ├── __init__.py │ │ │ │ └── advanced_features.py │ │ │ ├── examples │ │ │ │ ├── __init__.py │ │ │ │ └── user_management.py │ │ │ ├── ignore_this_dir_with_postfix │ │ │ │ └── ignored_module.py │ │ │ ├── scripts │ │ │ │ ├── __init__.py │ │ │ │ └── run_app.py │ │ │ └── test_repo │ │ │ ├── __init__.py │ │ │ ├── complex_types.py │ │ │ ├── models.py │ │ │ ├── name_collisions.py │ │ │ ├── nested_base.py │ │ │ ├── nested.py │ │ │ ├── overloaded.py │ │ │ ├── services.py │ │ │ ├── utils.py │ │ │ └── variables.py │ │ ├── r │ │ │ └── test_repo │ │ │ ├── .Rbuildignore │ │ │ ├── DESCRIPTION │ │ │ ├── examples │ │ │ │ └── analysis.R │ │ │ ├── NAMESPACE │ │ │ └── R │ │ │ ├── models.R │ │ │ └── utils.R │ │ ├── ruby │ │ │ └── test_repo │ │ │ ├── .solargraph.yml │ │ │ ├── examples │ │ │ │ └── user_management.rb │ │ │ ├── lib.rb │ │ │ ├── main.rb │ │ │ ├── models.rb │ │ │ ├── nested.rb │ │ │ ├── services.rb │ │ │ └── variables.rb │ │ ├── rust │ │ │ ├── test_repo │ │ │ │ ├── Cargo.lock │ │ │ │ ├── Cargo.toml │ │ │ │ └── src │ │ │ │ ├── lib.rs │ │ │ │ └── main.rs │ │ │ └── test_repo_2024 │ │ │ ├── Cargo.lock │ │ │ ├── Cargo.toml │ │ │ └── src │ │ │ ├── lib.rs │ │ │ └── main.rs │ │ ├── swift │ │ │ └── test_repo │ │ │ ├── Package.swift │ │ │ └── src │ │ │ ├── main.swift │ │ │ └── utils.swift │ │ ├── terraform │ │ │ └── test_repo │ │ │ ├── data.tf │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── variables.tf │ │ ├── typescript │ │ │ └── test_repo │ │ │ ├── .serena │ │ │ │ └── project.yml │ │ │ ├── index.ts │ │ │ ├── tsconfig.json │ │ │ └── use_helper.ts │ │ └── zig │ │ └── test_repo │ │ ├── .gitignore │ │ ├── build.zig │ │ ├── src │ │ │ ├── calculator.zig │ │ │ ├── main.zig │ │ │ └── math_utils.zig │ │ └── zls.json │ ├── serena │ │ ├── __init__.py │ │ ├── __snapshots__ │ │ │ └── test_symbol_editing.ambr │ │ ├── config │ │ │ ├── __init__.py │ │ │ └── test_serena_config.py │ │ ├── test_edit_marker.py │ │ ├── test_mcp.py │ │ ├── test_serena_agent.py │ │ ├── test_symbol_editing.py │ │ ├── test_symbol.py │ │ ├── test_text_utils.py │ │ ├── test_tool_parameter_types.py │ │ └── util │ │ ├── test_exception.py │ │ └── test_file_system.py │ └── solidlsp │ ├── al │ │ └── test_al_basic.py │ ├── bash │ │ ├── __init__.py │ │ └── test_bash_basic.py │ ├── clojure │ │ ├── __init__.py │ │ └── test_clojure_basic.py │ ├── csharp │ │ └── test_csharp_basic.py │ ├── dart │ │ ├── __init__.py │ │ └── test_dart_basic.py │ ├── elixir │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── test_elixir_basic.py │ │ ├── test_elixir_ignored_dirs.py │ │ ├── test_elixir_integration.py │ │ └── test_elixir_symbol_retrieval.py │ ├── elm │ │ └── test_elm_basic.py │ ├── erlang │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── test_erlang_basic.py │ │ ├── test_erlang_ignored_dirs.py │ │ └── test_erlang_symbol_retrieval.py │ ├── go │ │ └── test_go_basic.py │ ├── java │ │ └── test_java_basic.py │ ├── kotlin │ │ └── test_kotlin_basic.py │ ├── lua │ │ └── test_lua_basic.py │ ├── markdown │ │ ├── __init__.py │ │ └── test_markdown_basic.py │ ├── nix │ │ └── test_nix_basic.py │ ├── perl │ │ └── test_perl_basic.py │ ├── php │ │ └── test_php_basic.py │ ├── python │ │ ├── test_python_basic.py │ │ ├── test_retrieval_with_ignored_dirs.py │ │ └── test_symbol_retrieval.py │ ├── r │ │ ├── __init__.py │ │ └── test_r_basic.py │ ├── ruby │ │ ├── test_ruby_basic.py │ │ └── test_ruby_symbol_retrieval.py │ ├── rust │ │ ├── test_rust_2024_edition.py │ │ └── test_rust_basic.py │ ├── swift │ │ └── test_swift_basic.py │ ├── terraform │ │ └── test_terraform_basic.py │ ├── typescript │ │ └── test_typescript_basic.py │ ├── util │ │ └── test_zip.py │ └── zig │ └── test_zig_basic.py └── uv.lock ``` # Files -------------------------------------------------------------------------------- /test/solidlsp/r/test_r_basic.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Basic tests for R Language Server integration 3 | """ 4 | 5 | import os 6 | from pathlib import Path 7 | 8 | import pytest 9 | 10 | from solidlsp import SolidLanguageServer 11 | from solidlsp.ls_config import Language 12 | 13 | 14 | @pytest.mark.r 15 | class TestRLanguageServer: 16 | """Test basic functionality of the R language server.""" 17 | 18 | @pytest.mark.parametrize("language_server", [Language.R], indirect=True) 19 | @pytest.mark.parametrize("repo_path", [Language.R], indirect=True) 20 | def test_server_initialization(self, language_server: SolidLanguageServer, repo_path: Path): 21 | """Test that the R language server initializes properly.""" 22 | assert language_server is not None 23 | assert language_server.language_id == "r" 24 | assert language_server.is_running() 25 | assert Path(language_server.language_server.repository_root_path).resolve() == repo_path.resolve() 26 | 27 | @pytest.mark.parametrize("language_server", [Language.R], indirect=True) 28 | def test_symbol_retrieval(self, language_server: SolidLanguageServer): 29 | """Test R document symbol extraction.""" 30 | all_symbols, _root_symbols = language_server.request_document_symbols(os.path.join("R", "utils.R")) 31 | 32 | # Should find the three exported functions 33 | function_symbols = [s for s in all_symbols if s.get("kind") == 12] # Function kind 34 | assert len(function_symbols) >= 3 35 | 36 | # Check that we found the expected functions 37 | function_names = {s.get("name") for s in function_symbols} 38 | expected_functions = {"calculate_mean", "process_data", "create_data_frame"} 39 | assert expected_functions.issubset(function_names), f"Expected functions {expected_functions} but found {function_names}" 40 | 41 | @pytest.mark.parametrize("language_server", [Language.R], indirect=True) 42 | def test_find_definition_across_files(self, language_server: SolidLanguageServer): 43 | """Test finding function definitions across files.""" 44 | analysis_file = os.path.join("examples", "analysis.R") 45 | 46 | # In analysis.R line 7: create_data_frame(n = 50) 47 | # The function create_data_frame is defined in R/utils.R 48 | # Find definition of create_data_frame function call (0-indexed: line 6) 49 | definition_location_list = language_server.request_definition(analysis_file, 6, 17) # cursor on 'create_data_frame' 50 | 51 | assert definition_location_list, f"Expected non-empty definition_location_list but got {definition_location_list=}" 52 | assert len(definition_location_list) >= 1 53 | definition_location = definition_location_list[0] 54 | assert definition_location["uri"].endswith("utils.R") 55 | # Definition should be around line 37 (0-indexed: 36) where create_data_frame is defined 56 | assert definition_location["range"]["start"]["line"] >= 35 57 | 58 | @pytest.mark.parametrize("language_server", [Language.R], indirect=True) 59 | def test_find_references_across_files(self, language_server: SolidLanguageServer): 60 | """Test finding function references across files.""" 61 | analysis_file = os.path.join("examples", "analysis.R") 62 | 63 | # Test from usage side: find references to calculate_mean from its usage in analysis.R 64 | # In analysis.R line 13: calculate_mean(clean_data$value) 65 | # calculate_mean function call is at line 13 (0-indexed: line 12) 66 | references = language_server.request_references(analysis_file, 12, 15) # cursor on 'calculate_mean' 67 | 68 | assert references, f"Expected non-empty references for calculate_mean but got {references=}" 69 | 70 | # Must find the definition in utils.R (cross-file reference) 71 | reference_files = [ref["uri"] for ref in references] 72 | assert any(uri.endswith("utils.R") for uri in reference_files), "Cross-file reference to definition in utils.R not found" 73 | 74 | # Verify we actually found the right location in utils.R 75 | utils_refs = [ref for ref in references if ref["uri"].endswith("utils.R")] 76 | assert len(utils_refs) >= 1, "Should find at least one reference in utils.R" 77 | utils_ref = utils_refs[0] 78 | # Should be around line 6 where calculate_mean is defined (0-indexed: line 5) 79 | assert ( 80 | utils_ref["range"]["start"]["line"] == 5 81 | ), f"Expected reference at line 5 in utils.R, got line {utils_ref['range']['start']['line']}" 82 | 83 | def test_file_matching(self): 84 | """Test that R files are properly matched.""" 85 | from solidlsp.ls_config import Language 86 | 87 | matcher = Language.R.get_source_fn_matcher() 88 | 89 | assert matcher.is_relevant_filename("script.R") 90 | assert matcher.is_relevant_filename("analysis.r") 91 | assert not matcher.is_relevant_filename("script.py") 92 | assert not matcher.is_relevant_filename("README.md") 93 | 94 | def test_r_language_enum(self): 95 | """Test R language enum value.""" 96 | assert Language.R == "r" 97 | assert str(Language.R) == "r" 98 | ``` -------------------------------------------------------------------------------- /test/resources/repos/python/test_repo/scripts/run_app.py: -------------------------------------------------------------------------------- ```python 1 | #!/usr/bin/env python 2 | """ 3 | Main entry point script for the test_repo application. 4 | 5 | This script demonstrates how a typical application entry point would be structured, 6 | with command-line arguments, configuration loading, and service initialization. 7 | """ 8 | 9 | import argparse 10 | import json 11 | import logging 12 | import os 13 | import sys 14 | from typing import Any 15 | 16 | # Add parent directory to path to make imports work 17 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) 18 | 19 | from test_repo.models import Item, User 20 | from test_repo.services import ItemService, UserService 21 | 22 | # Configure logging 23 | logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") 24 | logger = logging.getLogger(__name__) 25 | 26 | 27 | def parse_args(): 28 | """Parse command line arguments.""" 29 | parser = argparse.ArgumentParser(description="Test Repo Application") 30 | 31 | parser.add_argument("--config", type=str, default="config.json", help="Path to configuration file") 32 | 33 | parser.add_argument("--mode", choices=["user", "item", "both"], default="both", help="Operation mode") 34 | 35 | parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") 36 | 37 | return parser.parse_args() 38 | 39 | 40 | def load_config(config_path: str) -> dict[str, Any]: 41 | """Load configuration from a JSON file.""" 42 | if not os.path.exists(config_path): 43 | logger.warning(f"Configuration file not found: {config_path}") 44 | return {} 45 | 46 | try: 47 | with open(config_path, encoding="utf-8") as f: 48 | return json.load(f) 49 | except json.JSONDecodeError: 50 | logger.error(f"Invalid JSON in configuration file: {config_path}") 51 | return {} 52 | except Exception as e: 53 | logger.error(f"Error loading configuration: {e}") 54 | return {} 55 | 56 | 57 | def create_sample_users(service: UserService, count: int = 3) -> list[User]: 58 | """Create sample users for demonstration.""" 59 | users = [] 60 | 61 | # Create admin user 62 | admin = service.create_user(name="Admin User", email="[email protected]", roles=["admin"]) 63 | users.append(admin) 64 | 65 | # Create regular users 66 | for i in range(count - 1): 67 | user = service.create_user(name=f"User {i + 1}", email=f"user{i + 1}@example.com", roles=["user"]) 68 | users.append(user) 69 | 70 | return users 71 | 72 | 73 | def create_sample_items(service: ItemService, count: int = 5) -> list[Item]: 74 | """Create sample items for demonstration.""" 75 | categories = ["Electronics", "Books", "Clothing", "Food", "Other"] 76 | items = [] 77 | 78 | for i in range(count): 79 | category = categories[i % len(categories)] 80 | item = service.create_item(name=f"Item {i + 1}", price=10.0 * (i + 1), category=category) 81 | items.append(item) 82 | 83 | return items 84 | 85 | 86 | def run_user_operations(service: UserService, config: dict[str, Any]) -> None: 87 | """Run operations related to users.""" 88 | logger.info("Running user operations") 89 | 90 | # Get configuration 91 | user_count = config.get("user_count", 3) 92 | 93 | # Create users 94 | users = create_sample_users(service, user_count) 95 | logger.info(f"Created {len(users)} users") 96 | 97 | # Demonstrate some operations 98 | for user in users: 99 | logger.info(f"User: {user.name} (ID: {user.id})") 100 | 101 | # Access a method to demonstrate method calls 102 | if user.has_role("admin"): 103 | logger.info(f"{user.name} is an admin") 104 | 105 | # Lookup a user 106 | found_user = service.get_user(users[0].id) 107 | if found_user: 108 | logger.info(f"Found user: {found_user.name}") 109 | 110 | 111 | def run_item_operations(service: ItemService, config: dict[str, Any]) -> None: 112 | """Run operations related to items.""" 113 | logger.info("Running item operations") 114 | 115 | # Get configuration 116 | item_count = config.get("item_count", 5) 117 | 118 | # Create items 119 | items = create_sample_items(service, item_count) 120 | logger.info(f"Created {len(items)} items") 121 | 122 | # Demonstrate some operations 123 | total_price = 0.0 124 | for item in items: 125 | price_display = item.get_display_price() 126 | logger.info(f"Item: {item.name}, Price: {price_display}") 127 | total_price += item.price 128 | 129 | logger.info(f"Total price of all items: ${total_price:.2f}") 130 | 131 | 132 | def main(): 133 | """Main entry point for the application.""" 134 | # Parse command line arguments 135 | args = parse_args() 136 | 137 | # Configure logging level 138 | if args.verbose: 139 | logging.getLogger().setLevel(logging.DEBUG) 140 | 141 | logger.info("Starting Test Repo Application") 142 | 143 | # Load configuration 144 | config = load_config(args.config) 145 | logger.debug(f"Loaded configuration: {config}") 146 | 147 | # Initialize services 148 | user_service = UserService() 149 | item_service = ItemService() 150 | 151 | # Run operations based on mode 152 | if args.mode in ("user", "both"): 153 | run_user_operations(user_service, config) 154 | 155 | if args.mode in ("item", "both"): 156 | run_item_operations(item_service, config) 157 | 158 | logger.info("Application completed successfully") 159 | 160 | 161 | item_reference = Item(id="1", name="Item 1", price=10.0, category="Electronics") 162 | 163 | if __name__ == "__main__": 164 | main() 165 | ``` -------------------------------------------------------------------------------- /src/serena/resources/config/prompt_templates/system_prompt.yml: -------------------------------------------------------------------------------- ```yaml 1 | # The system prompt template. Note that many clients will not allow configuration of the actual system prompt, 2 | # in which case this prompt will be given as a regular message on the call of a simple tool which the agent 3 | # is encouraged (via the tool description) to call at the beginning of the conversation. 4 | prompts: 5 | system_prompt: | 6 | You are a professional coding agent concerned with one particular codebase. You have 7 | access to semantic coding tools on which you rely heavily for all your work, as well as collection of memory 8 | files containing general information about the codebase. You operate in a resource-efficient and intelligent manner, always 9 | keeping in mind to not read or generate content that is not needed for the task at hand. 10 | 11 | When reading code in order to answer a user question or task, you should try reading only the necessary code. 12 | Some tasks may require you to understand the architecture of large parts of the codebase, while for others, 13 | it may be enough to read a small set of symbols or a single file. 14 | Generally, you should avoid reading entire files unless it is absolutely necessary, instead relying on 15 | intelligent step-by-step acquisition of information. {% if 'ToolMarkerSymbolicRead' in available_markers %}However, if you already read a file, it does not make 16 | sense to further analyse it with the symbolic tools (except for the `find_referencing_symbols` tool), 17 | as you already have the information.{% endif %} 18 | 19 | I WILL BE SERIOUSLY UPSET IF YOU READ ENTIRE FILES WITHOUT NEED! 20 | {% if 'ToolMarkerSymbolicRead' in available_markers %} 21 | CONSIDER INSTEAD USING THE OVERVIEW TOOL AND SYMBOLIC TOOLS TO READ ONLY THE NECESSARY CODE FIRST! 22 | I WILL BE EVEN MORE UPSET IF AFTER HAVING READ AN ENTIRE FILE YOU KEEP READING THE SAME CONTENT WITH THE SYMBOLIC TOOLS! 23 | THE PURPOSE OF THE SYMBOLIC TOOLS IS TO HAVE TO READ LESS CODE, NOT READ THE SAME CONTENT MULTIPLE TIMES! 24 | {% endif %} 25 | 26 | You can achieve the intelligent reading of code by using the symbolic tools for getting an overview of symbols and 27 | the relations between them, and then only reading the bodies of symbols that are necessary to answer the question 28 | or complete the task. 29 | You can use the standard tools like list_dir, find_file and search_for_pattern if you need to. 30 | When tools allow it, you pass the `relative_path` parameter to restrict the search to a specific file or directory. 31 | For some tools, `relative_path` can only be a file path, so make sure to properly read the tool descriptions. 32 | {% if 'search_for_pattern' in available_tools %} 33 | If you are unsure about a symbol's name or location{% if 'find_symbol' in available_tools %} (to the extent that substring_matching for the symbol name is not enough){% endif %}, you can use the `search_for_pattern` tool, which allows fast 34 | and flexible search for patterns in the codebase.{% if 'ToolMarkerSymbolicRead' in available_markers %}This way you can first find candidates for symbols or files, 35 | and then proceed with the symbolic tools.{% endif %} 36 | {% endif %} 37 | 38 | {% if 'ToolMarkerSymbolicRead' in available_markers %} 39 | Symbols are identified by their `name_path and `relative_path`, see the description of the `find_symbol` tool for more details 40 | on how the `name_path` matches symbols. 41 | You can get information about available symbols by using the `get_symbols_overview` tool for finding top-level symbols in a file, 42 | or by using `find_symbol` if you already know the symbol's name path. You generally try to read as little code as possible 43 | while still solving your task, meaning you only read the bodies when you need to, and after you have found the symbol you want to edit. 44 | For example, if you are working with python code and already know that you need to read the body of the constructor of the class Foo, you can directly 45 | use `find_symbol` with the name path `Foo/__init__` and `include_body=True`. If you don't know yet which methods in `Foo` you need to read or edit, 46 | you can use `find_symbol` with the name path `Foo`, `include_body=False` and `depth=1` to get all (top-level) methods of `Foo` before proceeding 47 | to read the desired methods with `include_body=True` 48 | You can understand relationships between symbols by using the `find_referencing_symbols` tool. 49 | {% endif %} 50 | 51 | {% if 'read_memory' in available_tools %} 52 | You generally have access to memories and it may be useful for you to read them, but also only if they help you 53 | to answer the question or complete the task. You can infer which memories are relevant to the current task by reading 54 | the memory names and descriptions. 55 | {% endif %} 56 | 57 | The context and modes of operation are described below. From them you can infer how to interact with your user 58 | and which tasks and kinds of interactions are expected of you. 59 | 60 | Context description: 61 | {{ context_system_prompt }} 62 | 63 | Modes descriptions: 64 | {% for prompt in mode_system_prompts %} 65 | - {{ prompt }} 66 | {% endfor %} 67 | ``` -------------------------------------------------------------------------------- /test/solidlsp/elixir/test_elixir_basic.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Basic integration tests for the Elixir language server functionality. 3 | 4 | These tests validate the functionality of the language server APIs 5 | like request_references using the test repository. 6 | """ 7 | 8 | import os 9 | 10 | import pytest 11 | 12 | from solidlsp import SolidLanguageServer 13 | from solidlsp.ls_config import Language 14 | 15 | from . import NEXTLS_UNAVAILABLE, NEXTLS_UNAVAILABLE_REASON 16 | 17 | # These marks will be applied to all tests in this module 18 | pytestmark = [pytest.mark.elixir, pytest.mark.skipif(NEXTLS_UNAVAILABLE, reason=f"Next LS not available: {NEXTLS_UNAVAILABLE_REASON}")] 19 | 20 | 21 | class TestElixirBasic: 22 | """Basic Elixir language server functionality tests.""" 23 | 24 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 25 | def test_request_references_function_definition(self, language_server: SolidLanguageServer): 26 | """Test finding references to a function definition.""" 27 | file_path = os.path.join("lib", "models.ex") 28 | symbols = language_server.request_document_symbols(file_path) 29 | 30 | # Find the User module's 'new' function 31 | user_new_symbol = None 32 | for symbol in symbols[0]: # Top level symbols 33 | if symbol.get("name") == "User" and symbol.get("kind") == 2: # Module 34 | for child in symbol.get("children", []): 35 | if child.get("name", "").startswith("def new(") and child.get("kind") == 12: # Function 36 | user_new_symbol = child 37 | break 38 | break 39 | 40 | if not user_new_symbol or "selectionRange" not in user_new_symbol: 41 | pytest.skip("User.new function or its selectionRange not found") 42 | 43 | sel_start = user_new_symbol["selectionRange"]["start"] 44 | references = language_server.request_references(file_path, sel_start["line"], sel_start["character"]) 45 | 46 | assert references is not None 47 | assert len(references) > 0 48 | 49 | # Should find at least one reference (the definition itself) 50 | found_definition = any(ref["uri"].endswith("models.ex") for ref in references) 51 | assert found_definition, "Should find the function definition" 52 | 53 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 54 | def test_request_references_create_user_function(self, language_server: SolidLanguageServer): 55 | """Test finding references to create_user function.""" 56 | file_path = os.path.join("lib", "services.ex") 57 | symbols = language_server.request_document_symbols(file_path) 58 | 59 | # Find the UserService module's 'create_user' function 60 | create_user_symbol = None 61 | for symbol in symbols[0]: # Top level symbols 62 | if symbol.get("name") == "UserService" and symbol.get("kind") == 2: # Module 63 | for child in symbol.get("children", []): 64 | if child.get("name", "").startswith("def create_user(") and child.get("kind") == 12: # Function 65 | create_user_symbol = child 66 | break 67 | break 68 | 69 | if not create_user_symbol or "selectionRange" not in create_user_symbol: 70 | pytest.skip("UserService.create_user function or its selectionRange not found") 71 | 72 | sel_start = create_user_symbol["selectionRange"]["start"] 73 | references = language_server.request_references(file_path, sel_start["line"], sel_start["character"]) 74 | 75 | assert references is not None 76 | assert len(references) > 0 77 | 78 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 79 | def test_request_referencing_symbols_function(self, language_server: SolidLanguageServer): 80 | """Test finding symbols that reference a specific function.""" 81 | file_path = os.path.join("lib", "models.ex") 82 | symbols = language_server.request_document_symbols(file_path) 83 | 84 | # Find the User module's 'new' function 85 | user_new_symbol = None 86 | for symbol in symbols[0]: # Top level symbols 87 | if symbol.get("name") == "User" and symbol.get("kind") == 2: # Module 88 | for child in symbol.get("children", []): 89 | if child.get("name", "").startswith("def new(") and child.get("kind") == 12: # Function 90 | user_new_symbol = child 91 | break 92 | break 93 | 94 | if not user_new_symbol or "selectionRange" not in user_new_symbol: 95 | pytest.skip("User.new function or its selectionRange not found") 96 | 97 | sel_start = user_new_symbol["selectionRange"]["start"] 98 | referencing_symbols = language_server.request_referencing_symbols(file_path, sel_start["line"], sel_start["character"]) 99 | 100 | assert referencing_symbols is not None 101 | 102 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 103 | def test_timeout_enumeration_bug(self, language_server: SolidLanguageServer): 104 | """Test that enumeration doesn't timeout (regression test).""" 105 | # This should complete without timing out 106 | symbols = language_server.request_document_symbols("lib/models.ex") 107 | assert symbols is not None 108 | 109 | # Test multiple symbol requests in succession 110 | for _ in range(3): 111 | symbols = language_server.request_document_symbols("lib/services.ex") 112 | assert symbols is not None 113 | ``` -------------------------------------------------------------------------------- /src/serena/tools/workflow_tools.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Tools supporting the general workflow of the agent 3 | """ 4 | 5 | import json 6 | import platform 7 | 8 | from serena.tools import Tool, ToolMarkerDoesNotRequireActiveProject, ToolMarkerOptional 9 | 10 | 11 | class CheckOnboardingPerformedTool(Tool): 12 | """ 13 | Checks whether project onboarding was already performed. 14 | """ 15 | 16 | def apply(self) -> str: 17 | """ 18 | Checks whether project onboarding was already performed. 19 | You should always call this tool before beginning to actually work on the project/after activating a project, 20 | but after calling the initial instructions tool. 21 | """ 22 | from .memory_tools import ListMemoriesTool 23 | 24 | list_memories_tool = self.agent.get_tool(ListMemoriesTool) 25 | memories = json.loads(list_memories_tool.apply()) 26 | if len(memories) == 0: 27 | return ( 28 | "Onboarding not performed yet (no memories available). " 29 | + "You should perform onboarding by calling the `onboarding` tool before proceeding with the task." 30 | ) 31 | else: 32 | return f"""The onboarding was already performed, below is the list of available memories. 33 | Do not read them immediately, just remember that they exist and that you can read them later, if it is necessary 34 | for the current task. 35 | Some memories may be based on previous conversations, others may be general for the current project. 36 | You should be able to tell which one you need based on the name of the memory. 37 | 38 | {memories}""" 39 | 40 | 41 | class OnboardingTool(Tool): 42 | """ 43 | Performs onboarding (identifying the project structure and essential tasks, e.g. for testing or building). 44 | """ 45 | 46 | def apply(self) -> str: 47 | """ 48 | Call this tool if onboarding was not performed yet. 49 | You will call this tool at most once per conversation. 50 | 51 | :return: instructions on how to create the onboarding information 52 | """ 53 | system = platform.system() 54 | return self.prompt_factory.create_onboarding_prompt(system=system) 55 | 56 | 57 | class ThinkAboutCollectedInformationTool(Tool): 58 | """ 59 | Thinking tool for pondering the completeness of collected information. 60 | """ 61 | 62 | def apply(self) -> str: 63 | """ 64 | Think about the collected information and whether it is sufficient and relevant. 65 | This tool should ALWAYS be called after you have completed a non-trivial sequence of searching steps like 66 | find_symbol, find_referencing_symbols, search_files_for_pattern, read_file, etc. 67 | """ 68 | return self.prompt_factory.create_think_about_collected_information() 69 | 70 | 71 | class ThinkAboutTaskAdherenceTool(Tool): 72 | """ 73 | Thinking tool for determining whether the agent is still on track with the current task. 74 | """ 75 | 76 | def apply(self) -> str: 77 | """ 78 | Think about the task at hand and whether you are still on track. 79 | Especially important if the conversation has been going on for a while and there 80 | has been a lot of back and forth. 81 | 82 | This tool should ALWAYS be called before you insert, replace, or delete code. 83 | """ 84 | return self.prompt_factory.create_think_about_task_adherence() 85 | 86 | 87 | class ThinkAboutWhetherYouAreDoneTool(Tool): 88 | """ 89 | Thinking tool for determining whether the task is truly completed. 90 | """ 91 | 92 | def apply(self) -> str: 93 | """ 94 | Whenever you feel that you are done with what the user has asked for, it is important to call this tool. 95 | """ 96 | return self.prompt_factory.create_think_about_whether_you_are_done() 97 | 98 | 99 | class SummarizeChangesTool(Tool, ToolMarkerOptional): 100 | """ 101 | Provides instructions for summarizing the changes made to the codebase. 102 | """ 103 | 104 | def apply(self) -> str: 105 | """ 106 | Summarize the changes you have made to the codebase. 107 | This tool should always be called after you have fully completed any non-trivial coding task, 108 | but only after the think_about_whether_you_are_done call. 109 | """ 110 | return self.prompt_factory.create_summarize_changes() 111 | 112 | 113 | class PrepareForNewConversationTool(Tool): 114 | """ 115 | Provides instructions for preparing for a new conversation (in order to continue with the necessary context). 116 | """ 117 | 118 | def apply(self) -> str: 119 | """ 120 | Instructions for preparing for a new conversation. This tool should only be called on explicit user request. 121 | """ 122 | return self.prompt_factory.create_prepare_for_new_conversation() 123 | 124 | 125 | class InitialInstructionsTool(Tool, ToolMarkerDoesNotRequireActiveProject, ToolMarkerOptional): 126 | """ 127 | Gets the initial instructions for the current project. 128 | Should only be used in settings where the system prompt cannot be set, 129 | e.g. in clients you have no control over, like Claude Desktop. 130 | """ 131 | 132 | def apply(self) -> str: 133 | """ 134 | Get the initial instructions for the current coding project. 135 | If you haven't received instructions on how to use Serena's tools in the system prompt, 136 | you should always call this tool before starting to work (including using any other tool) on any programming task, 137 | the only exception being when you are asked to call `activate_project`, which you should then call before. 138 | """ 139 | return self.agent.create_system_prompt() 140 | ``` -------------------------------------------------------------------------------- /test/serena/util/test_exception.py: -------------------------------------------------------------------------------- ```python 1 | import os 2 | from unittest.mock import MagicMock, Mock, patch 3 | 4 | import pytest 5 | 6 | from serena.util.exception import is_headless_environment, show_fatal_exception_safe 7 | 8 | 9 | class TestHeadlessEnvironmentDetection: 10 | """Test class for headless environment detection functionality.""" 11 | 12 | def test_is_headless_no_display(self): 13 | """Test that environment without DISPLAY is detected as headless on Linux.""" 14 | with patch("sys.platform", "linux"): 15 | with patch.dict(os.environ, {}, clear=True): 16 | assert is_headless_environment() is True 17 | 18 | def test_is_headless_ssh_connection(self): 19 | """Test that SSH sessions are detected as headless.""" 20 | with patch("sys.platform", "linux"): 21 | with patch.dict(os.environ, {"SSH_CONNECTION": "192.168.1.1 22 192.168.1.2 22", "DISPLAY": ":0"}): 22 | assert is_headless_environment() is True 23 | 24 | with patch.dict(os.environ, {"SSH_CLIENT": "192.168.1.1 22 22", "DISPLAY": ":0"}): 25 | assert is_headless_environment() is True 26 | 27 | def test_is_headless_wsl(self): 28 | """Test that WSL environment is detected as headless.""" 29 | # Skip this test on Windows since os.uname doesn't exist 30 | if not hasattr(os, "uname"): 31 | pytest.skip("os.uname not available on this platform") 32 | 33 | with patch("sys.platform", "linux"): 34 | with patch("os.uname") as mock_uname: 35 | mock_uname.return_value = Mock(release="5.15.153.1-microsoft-standard-WSL2") 36 | with patch.dict(os.environ, {"DISPLAY": ":0"}): 37 | assert is_headless_environment() is True 38 | 39 | def test_is_headless_docker(self): 40 | """Test that Docker containers are detected as headless.""" 41 | with patch("sys.platform", "linux"): 42 | # Test with CI environment variable 43 | with patch.dict(os.environ, {"CI": "true", "DISPLAY": ":0"}): 44 | assert is_headless_environment() is True 45 | 46 | # Test with CONTAINER environment variable 47 | with patch.dict(os.environ, {"CONTAINER": "docker", "DISPLAY": ":0"}): 48 | assert is_headless_environment() is True 49 | 50 | # Test with .dockerenv file 51 | with patch("os.path.exists") as mock_exists: 52 | mock_exists.return_value = True 53 | with patch.dict(os.environ, {"DISPLAY": ":0"}): 54 | assert is_headless_environment() is True 55 | 56 | def test_is_not_headless_windows(self): 57 | """Test that Windows is never detected as headless.""" 58 | with patch("sys.platform", "win32"): 59 | # Even without DISPLAY, Windows should not be headless 60 | with patch.dict(os.environ, {}, clear=True): 61 | assert is_headless_environment() is False 62 | 63 | 64 | class TestShowFatalExceptionSafe: 65 | """Test class for safe fatal exception display functionality.""" 66 | 67 | @patch("serena.util.exception.is_headless_environment", return_value=True) 68 | @patch("serena.util.exception.log") 69 | def test_show_fatal_exception_safe_headless(self, mock_log, mock_is_headless): 70 | """Test that GUI is not attempted in headless environment.""" 71 | test_exception = ValueError("Test error") 72 | 73 | # The import should never happen in headless mode 74 | with patch("serena.gui_log_viewer.show_fatal_exception") as mock_show_gui: 75 | show_fatal_exception_safe(test_exception) 76 | mock_show_gui.assert_not_called() 77 | 78 | # Verify debug log about skipping GUI 79 | mock_log.debug.assert_called_once_with("Skipping GUI error display in headless environment") 80 | 81 | @patch("serena.util.exception.is_headless_environment", return_value=False) 82 | @patch("serena.util.exception.log") 83 | def test_show_fatal_exception_safe_with_gui(self, mock_log, mock_is_headless): 84 | """Test that GUI is attempted when not in headless environment.""" 85 | test_exception = ValueError("Test error") 86 | 87 | # Mock the GUI function 88 | with patch("serena.gui_log_viewer.show_fatal_exception") as mock_show_gui: 89 | show_fatal_exception_safe(test_exception) 90 | mock_show_gui.assert_called_once_with(test_exception) 91 | 92 | @patch("serena.util.exception.is_headless_environment", return_value=False) 93 | @patch("serena.util.exception.log") 94 | def test_show_fatal_exception_safe_gui_failure(self, mock_log, mock_is_headless): 95 | """Test graceful handling when GUI display fails.""" 96 | test_exception = ValueError("Test error") 97 | gui_error = ImportError("No module named 'tkinter'") 98 | 99 | # Mock the GUI function to raise an exception 100 | with patch("serena.gui_log_viewer.show_fatal_exception", side_effect=gui_error): 101 | show_fatal_exception_safe(test_exception) 102 | 103 | # Verify debug log about GUI failure 104 | mock_log.debug.assert_called_with(f"Failed to show GUI error dialog: {gui_error}") 105 | 106 | def test_show_fatal_exception_safe_prints_to_stderr(self): 107 | """Test that exceptions are always printed to stderr.""" 108 | test_exception = ValueError("Test error message") 109 | 110 | with patch("sys.stderr", new_callable=MagicMock) as mock_stderr: 111 | with patch("serena.util.exception.is_headless_environment", return_value=True): 112 | with patch("serena.util.exception.log"): 113 | show_fatal_exception_safe(test_exception) 114 | 115 | # Verify print was called with the correct arguments 116 | mock_stderr.write.assert_any_call("Fatal exception: Test error message") 117 | ``` -------------------------------------------------------------------------------- /src/serena/resources/config/prompt_templates/simple_tool_outputs.yml: -------------------------------------------------------------------------------- ```yaml 1 | # Some of Serena's tools are just outputting a fixed text block without doing anything else. 2 | # Such tools are meant to encourage the agent to think in a certain way, to stay on track 3 | # and so on. The (templates for) outputs of these tools are contained here. 4 | prompts: 5 | onboarding_prompt: | 6 | You are viewing the project for the first time. 7 | Your task is to assemble relevant high-level information about the project which 8 | will be saved to memory files in the following steps. 9 | The information should be sufficient to understand what the project is about, 10 | and the most important commands for developing code. 11 | The project is being developed on the system: {{ system }}. 12 | 13 | You need to identify at least the following information: 14 | * the project's purpose 15 | * the tech stack used 16 | * the code style and conventions used (including naming, type hints, docstrings, etc.) 17 | * which commands to run when a task is completed (linting, formatting, testing, etc.) 18 | * the rough structure of the codebase 19 | * the commands for testing, formatting, and linting 20 | * the commands for running the entrypoints of the project 21 | * the util commands for the system, like `git`, `ls`, `cd`, `grep`, `find`, etc. Keep in mind that the system is {{ system }}, 22 | so the commands might be different than on a regular unix system. 23 | * whether there are particular guidelines, styles, design patterns, etc. that one should know about 24 | 25 | This list is not exhaustive, you can add more information if you think it is relevant. 26 | 27 | For doing that, you will need to acquire information about the project with the corresponding tools. 28 | Read only the necessary files and directories to avoid loading too much data into memory. 29 | If you cannot find everything you need from the project itself, you should ask the user for more information. 30 | 31 | After collecting all the information, you will use the `write_memory` tool (in multiple calls) to save it to various memory files. 32 | A particularly important memory file will be the `suggested_commands.md` file, which should contain 33 | a list of commands that the user should know about to develop code in this project. 34 | Moreover, you should create memory files for the style and conventions and a dedicated memory file for 35 | what should be done when a task is completed. 36 | **Important**: after done with the onboarding task, remember to call the `write_memory` to save the collected information! 37 | 38 | think_about_collected_information: | 39 | Have you collected all the information you need for solving the current task? If not, can the missing information be acquired by using the available tools, 40 | in particular the tools related to symbol discovery? Or do you need to ask the user for more information? 41 | Think about it step by step and give a summary of the missing information and how it could be acquired. 42 | 43 | think_about_task_adherence: | 44 | Are you deviating from the task at hand? Do you need any additional information to proceed? 45 | Have you loaded all relevant memory files to see whether your implementation is fully aligned with the 46 | code style, conventions, and guidelines of the project? If not, adjust your implementation accordingly 47 | before modifying any code into the codebase. 48 | Note that it is better to stop and ask the user for clarification 49 | than to perform large changes which might not be aligned with the user's intentions. 50 | If you feel like the conversation is deviating too much from the original task, apologize and suggest to the user 51 | how to proceed. If the conversation became too long, create a summary of the current progress and suggest to the user 52 | to start a new conversation based on that summary. 53 | 54 | think_about_whether_you_are_done: | 55 | Have you already performed all the steps required by the task? Is it appropriate to run tests and linting, and if so, 56 | have you done that already? Is it appropriate to adjust non-code files like documentation and config and have you done that already? 57 | Should new tests be written to cover the changes? 58 | Note that a task that is just about exploring the codebase does not require running tests or linting. 59 | Read the corresponding memory files to see what should be done when a task is completed. 60 | 61 | summarize_changes: | 62 | Summarize all the changes you have made to the codebase over the course of the conversation. 63 | Explore the diff if needed (e.g. by using `git diff`) to ensure that you have not missed anything. 64 | Explain whether and how the changes are covered by tests. Explain how to best use the new code, how to understand it, 65 | which existing code it affects and interacts with. Are there any dangers (like potential breaking changes or potential new problems) 66 | that the user should be aware of? Should any new documentation be written or existing documentation updated? 67 | You can use tools to explore the codebase prior to writing the summary, but don't write any new code in this step until 68 | the summary is complete. 69 | 70 | prepare_for_new_conversation: | 71 | You have not yet completed the current task but we are running out of context. 72 | {mode_prepare_for_new_conversation} 73 | Imagine that you are handing over the task to another person who has access to the 74 | same tools and memory files as you do, but has not been part of the conversation so far. 75 | Write a summary that can be used in the next conversation to a memory file using the `write_memory` tool. 76 | ``` -------------------------------------------------------------------------------- /DOCKER.md: -------------------------------------------------------------------------------- ```markdown 1 | # Docker Setup for Serena (Experimental) 2 | 3 | ⚠️ **EXPERIMENTAL FEATURE**: The Docker setup for Serena is currently experimental and has several limitations. Please read this entire document before using Docker with Serena. 4 | 5 | ## Overview 6 | 7 | Docker support allows you to run Serena in an isolated container environment, which provides better security isolation for the shell tool and consistent dependencies across different systems. 8 | 9 | ## Benefits 10 | 11 | - **Safer shell tool execution**: Commands run in an isolated container environment 12 | - **Consistent dependencies**: No need to manage language servers and dependencies on your host system 13 | - **Cross-platform support**: Works consistently across Windows, macOS, and Linux 14 | 15 | ## Important Limitations and Caveats 16 | 17 | ### 1. Configuration File Conflicts 18 | 19 | ⚠️ **Critical**: Docker uses a separate configuration file (`serena_config.docker.yml`) to avoid path conflicts. When running in Docker: 20 | - Container paths will be stored in the configuration (e.g., `/workspaces/serena/...`) 21 | - These paths are incompatible with non-Docker usage 22 | - After using Docker, you cannot directly switch back to non-Docker usage without manual configuration adjustment 23 | 24 | ### 2. Project Activation Limitations 25 | 26 | - **Only mounted directories work**: Projects must be mounted as volumes to be accessible 27 | - Projects outside the mounted directories cannot be activated or accessed 28 | - Default setup only mounts the current directory 29 | 30 | ### 3. GUI Window Disabled 31 | 32 | - The GUI log window option is automatically disabled in Docker environments 33 | - Use the web dashboard instead (see below) 34 | 35 | ### 4. Dashboard Port Configuration 36 | 37 | The web dashboard runs on port 24282 (0x5EDA) by default. You can configure this using environment variables: 38 | 39 | ```bash 40 | # Use default ports 41 | docker-compose up serena 42 | 43 | # Use custom ports 44 | SERENA_DASHBOARD_PORT=8080 docker-compose up serena 45 | ``` 46 | 47 | ⚠️ **Note**: If the local port is occupied, you'll need to specify a different port using the environment variable. 48 | 49 | ### 5. Line Ending Issues on Windows 50 | 51 | ⚠️ **Windows Users**: Be aware of potential line ending inconsistencies: 52 | - Files edited within the Docker container may use Unix line endings (LF) 53 | - Your Windows system may expect Windows line endings (CRLF) 54 | - This can cause issues with version control and text editors 55 | - Configure your Git settings appropriately: `git config core.autocrlf true` 56 | 57 | ## Quick Start 58 | 59 | ### Using Docker Compose (Recommended) 60 | 61 | 1. **Production mode** (for using Serena as MCP server): 62 | ```bash 63 | docker-compose up serena 64 | ``` 65 | 66 | 2. **Development mode** (with source code mounted): 67 | ```bash 68 | docker-compose up serena-dev 69 | ``` 70 | 71 | Note: Edit the `compose.yaml` file to customize volume mounts for your projects. 72 | 73 | ### Using Docker directly 74 | 75 | ```bash 76 | # Build the image 77 | docker build -t serena . 78 | 79 | # Run with current directory mounted 80 | docker run -it --rm \ 81 | -v "$(pwd)":/workspace \ 82 | -p 9121:9121 \ 83 | -p 24282:24282 \ 84 | -e SERENA_DOCKER=1 \ 85 | serena 86 | ``` 87 | 88 | ### Using Docker Compose with Merge Compose files 89 | 90 | To use Docker Compose with merge files, you can create a `compose.override.yml` file to customize the configuration: 91 | 92 | ```yaml 93 | services: 94 | serena: 95 | # To work with projects, you must mount them as volumes: 96 | volumes: 97 | - ./my-project:/workspace/my-project 98 | - /path/to/another/project:/workspace/another-project 99 | # Add the context for the IDE assistant option: 100 | command: 101 | - "uv run --directory . serena-mcp-server --transport sse --port 9121 --host 0.0.0.0 --context ide-assistant" 102 | ``` 103 | 104 | See the [Docker Merge Compose files documentation](https://docs.docker.com/compose/how-tos/multiple-compose-files/merge/) for more details on using merge files. 105 | 106 | ## Accessing the Dashboard 107 | 108 | Once running, access the web dashboard at: 109 | - Default: http://localhost:24282/dashboard 110 | - Custom port: http://localhost:${SERENA_DASHBOARD_PORT}/dashboard 111 | 112 | ## Volume Mounting 113 | 114 | To work with projects, you must mount them as volumes: 115 | 116 | ```yaml 117 | # In compose.yaml 118 | volumes: 119 | - ./my-project:/workspace/my-project 120 | - /path/to/another/project:/workspace/another-project 121 | ``` 122 | 123 | ## Environment Variables 124 | 125 | - `SERENA_DOCKER=1`: Set automatically to indicate Docker environment 126 | - `SERENA_PORT`: MCP server port (default: 9121) 127 | - `SERENA_DASHBOARD_PORT`: Web dashboard port (default: 24282) 128 | - `INTELEPHENSE_LICENSE_KEY`: License key for Intelephense PHP LSP premium features (optional) 129 | 130 | ## Troubleshooting 131 | 132 | ### Port Already in Use 133 | 134 | If you see "port already in use" errors: 135 | ```bash 136 | # Check what's using the port 137 | lsof -i :24282 # macOS/Linux 138 | netstat -ano | findstr :24282 # Windows 139 | 140 | # Use a different port 141 | SERENA_DASHBOARD_PORT=8080 docker-compose up serena 142 | ``` 143 | 144 | ### Configuration Issues 145 | 146 | If you need to reset Docker configuration: 147 | ```bash 148 | # Remove Docker-specific config 149 | rm serena_config.docker.yml 150 | 151 | # Serena will auto-generate a new one on next run 152 | ``` 153 | 154 | ### Project Access Issues 155 | 156 | Ensure projects are properly mounted: 157 | - Check volume mounts in `docker-compose.yaml` 158 | - Use absolute paths for external projects 159 | - Verify permissions on mounted directories 160 | 161 | ## Migration Path 162 | 163 | To switch between Docker and non-Docker usage: 164 | 165 | 1. **Docker to Non-Docker**: 166 | - Manually edit project paths in `serena_config.yml` 167 | - Change container paths to host paths 168 | - Or use separate config files for each environment 169 | 170 | 2. **Non-Docker to Docker**: 171 | - Projects will be re-registered with container paths 172 | - Original config remains unchanged 173 | 174 | ## Future Improvements 175 | 176 | We're working on: 177 | - Automatic config migration between environments 178 | - Better project path handling 179 | - Dynamic port allocation 180 | - Windows line-ending handling. 181 | ``` -------------------------------------------------------------------------------- /src/serena/analytics.py: -------------------------------------------------------------------------------- ```python 1 | from __future__ import annotations 2 | 3 | import logging 4 | import threading 5 | from abc import ABC, abstractmethod 6 | from collections import defaultdict 7 | from copy import copy 8 | from dataclasses import asdict, dataclass 9 | from enum import Enum 10 | 11 | from anthropic.types import MessageParam, MessageTokensCount 12 | from dotenv import load_dotenv 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | class TokenCountEstimator(ABC): 18 | @abstractmethod 19 | def estimate_token_count(self, text: str) -> int: 20 | """ 21 | Estimate the number of tokens in the given text. 22 | This is an abstract method that should be implemented by subclasses. 23 | """ 24 | 25 | 26 | class TiktokenCountEstimator(TokenCountEstimator): 27 | """ 28 | Approximate token count using tiktoken. 29 | """ 30 | 31 | def __init__(self, model_name: str = "gpt-4o"): 32 | """ 33 | The tokenizer will be downloaded on the first initialization, which may take some time. 34 | 35 | :param model_name: see `tiktoken.model` to see available models. 36 | """ 37 | import tiktoken 38 | 39 | log.info(f"Loading tiktoken encoding for model {model_name}, this may take a while on the first run.") 40 | self._encoding = tiktoken.encoding_for_model(model_name) 41 | 42 | def estimate_token_count(self, text: str) -> int: 43 | return len(self._encoding.encode(text)) 44 | 45 | 46 | class AnthropicTokenCount(TokenCountEstimator): 47 | """ 48 | The exact count using the Anthropic API. 49 | Counting is free, but has a rate limit and will require an API key, 50 | (typically, set through an env variable). 51 | See https://docs.anthropic.com/en/docs/build-with-claude/token-counting 52 | """ 53 | 54 | def __init__(self, model_name: str = "claude-sonnet-4-20250514", api_key: str | None = None): 55 | import anthropic 56 | 57 | self._model_name = model_name 58 | if api_key is None: 59 | load_dotenv() 60 | self._anthropic_client = anthropic.Anthropic(api_key=api_key) 61 | 62 | def _send_count_tokens_request(self, text: str) -> MessageTokensCount: 63 | return self._anthropic_client.messages.count_tokens( 64 | model=self._model_name, 65 | messages=[MessageParam(role="user", content=text)], 66 | ) 67 | 68 | def estimate_token_count(self, text: str) -> int: 69 | return self._send_count_tokens_request(text).input_tokens 70 | 71 | 72 | _registered_token_estimator_instances_cache: dict[RegisteredTokenCountEstimator, TokenCountEstimator] = {} 73 | 74 | 75 | class RegisteredTokenCountEstimator(Enum): 76 | TIKTOKEN_GPT4O = "TIKTOKEN_GPT4O" 77 | ANTHROPIC_CLAUDE_SONNET_4 = "ANTHROPIC_CLAUDE_SONNET_4" 78 | 79 | @classmethod 80 | def get_valid_names(cls) -> list[str]: 81 | """ 82 | Get a list of all registered token count estimator names. 83 | """ 84 | return [estimator.name for estimator in cls] 85 | 86 | def _create_estimator(self) -> TokenCountEstimator: 87 | match self: 88 | case RegisteredTokenCountEstimator.TIKTOKEN_GPT4O: 89 | return TiktokenCountEstimator(model_name="gpt-4o") 90 | case RegisteredTokenCountEstimator.ANTHROPIC_CLAUDE_SONNET_4: 91 | return AnthropicTokenCount(model_name="claude-sonnet-4-20250514") 92 | case _: 93 | raise ValueError(f"Unknown token count estimator: {self.value}") 94 | 95 | def load_estimator(self) -> TokenCountEstimator: 96 | estimator_instance = _registered_token_estimator_instances_cache.get(self) 97 | if estimator_instance is None: 98 | estimator_instance = self._create_estimator() 99 | _registered_token_estimator_instances_cache[self] = estimator_instance 100 | return estimator_instance 101 | 102 | 103 | class ToolUsageStats: 104 | """ 105 | A class to record and manage tool usage statistics. 106 | """ 107 | 108 | def __init__(self, token_count_estimator: RegisteredTokenCountEstimator = RegisteredTokenCountEstimator.TIKTOKEN_GPT4O): 109 | self._token_count_estimator = token_count_estimator.load_estimator() 110 | self._token_estimator_name = token_count_estimator.value 111 | self._tool_stats: dict[str, ToolUsageStats.Entry] = defaultdict(ToolUsageStats.Entry) 112 | self._tool_stats_lock = threading.Lock() 113 | 114 | @property 115 | def token_estimator_name(self) -> str: 116 | """ 117 | Get the name of the registered token count estimator used. 118 | """ 119 | return self._token_estimator_name 120 | 121 | @dataclass(kw_only=True) 122 | class Entry: 123 | num_times_called: int = 0 124 | input_tokens: int = 0 125 | output_tokens: int = 0 126 | 127 | def update_on_call(self, input_tokens: int, output_tokens: int) -> None: 128 | """ 129 | Update the entry with the number of tokens used for a single call. 130 | """ 131 | self.num_times_called += 1 132 | self.input_tokens += input_tokens 133 | self.output_tokens += output_tokens 134 | 135 | def _estimate_token_count(self, text: str) -> int: 136 | return self._token_count_estimator.estimate_token_count(text) 137 | 138 | def get_stats(self, tool_name: str) -> ToolUsageStats.Entry: 139 | """ 140 | Get (a copy of) the current usage statistics for a specific tool. 141 | """ 142 | with self._tool_stats_lock: 143 | return copy(self._tool_stats[tool_name]) 144 | 145 | def record_tool_usage(self, tool_name: str, input_str: str, output_str: str) -> None: 146 | input_tokens = self._estimate_token_count(input_str) 147 | output_tokens = self._estimate_token_count(output_str) 148 | with self._tool_stats_lock: 149 | entry = self._tool_stats[tool_name] 150 | entry.update_on_call(input_tokens, output_tokens) 151 | 152 | def get_tool_stats_dict(self) -> dict[str, dict[str, int]]: 153 | with self._tool_stats_lock: 154 | return {name: asdict(entry) for name, entry in self._tool_stats.items()} 155 | 156 | def clear(self) -> None: 157 | with self._tool_stats_lock: 158 | self._tool_stats.clear() 159 | ``` -------------------------------------------------------------------------------- /test/solidlsp/rust/test_rust_2024_edition.py: -------------------------------------------------------------------------------- ```python 1 | import os 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | from solidlsp.ls_config import Language 7 | from solidlsp.ls_utils import SymbolUtils 8 | from test.conftest import create_ls 9 | 10 | 11 | @pytest.mark.rust 12 | class TestRust2024EditionLanguageServer: 13 | @classmethod 14 | def setup_class(cls): 15 | """Set up the test class with the Rust 2024 edition test repository.""" 16 | cls.test_repo_2024_path = Path(__file__).parent.parent.parent / "resources" / "repos" / "rust" / "test_repo_2024" 17 | 18 | if not cls.test_repo_2024_path.exists(): 19 | pytest.skip("Rust 2024 edition test repository not found") 20 | 21 | # Create and start the language server for the 2024 edition repo 22 | cls.language_server = create_ls(Language.RUST, str(cls.test_repo_2024_path)) 23 | cls.language_server.start() 24 | 25 | @classmethod 26 | def teardown_class(cls): 27 | """Clean up the language server.""" 28 | if hasattr(cls, "language_server"): 29 | cls.language_server.stop() 30 | 31 | def test_find_references_raw(self) -> None: 32 | # Test finding references to the 'add' function defined in main.rs 33 | file_path = os.path.join("src", "main.rs") 34 | symbols = self.language_server.request_document_symbols(file_path) 35 | add_symbol = None 36 | for sym in symbols[0]: 37 | if sym.get("name") == "add": 38 | add_symbol = sym 39 | break 40 | assert add_symbol is not None, "Could not find 'add' function symbol in main.rs" 41 | sel_start = add_symbol["selectionRange"]["start"] 42 | refs = self.language_server.request_references(file_path, sel_start["line"], sel_start["character"]) 43 | # The add function should be referenced within main.rs itself (in the main function) 44 | assert any("main.rs" in ref.get("relativePath", "") for ref in refs), "main.rs should reference add function" 45 | 46 | def test_find_symbol(self) -> None: 47 | symbols = self.language_server.request_full_symbol_tree() 48 | assert SymbolUtils.symbol_tree_contains_name(symbols, "main"), "main function not found in symbol tree" 49 | assert SymbolUtils.symbol_tree_contains_name(symbols, "add"), "add function not found in symbol tree" 50 | assert SymbolUtils.symbol_tree_contains_name(symbols, "multiply"), "multiply function not found in symbol tree" 51 | assert SymbolUtils.symbol_tree_contains_name(symbols, "Calculator"), "Calculator struct not found in symbol tree" 52 | 53 | def test_find_referencing_symbols_multiply(self) -> None: 54 | # Find references to 'multiply' function defined in lib.rs 55 | file_path = os.path.join("src", "lib.rs") 56 | symbols = self.language_server.request_document_symbols(file_path) 57 | multiply_symbol = None 58 | for sym in symbols[0]: 59 | if sym.get("name") == "multiply": 60 | multiply_symbol = sym 61 | break 62 | assert multiply_symbol is not None, "Could not find 'multiply' function symbol in lib.rs" 63 | sel_start = multiply_symbol["selectionRange"]["start"] 64 | refs = self.language_server.request_references(file_path, sel_start["line"], sel_start["character"]) 65 | # The multiply function exists but may not be referenced anywhere, which is fine 66 | # This test just verifies we can find the symbol and request references without error 67 | assert isinstance(refs, list), "Should return a list of references (even if empty)" 68 | 69 | def test_find_calculator_struct_and_impl(self) -> None: 70 | # Test finding the Calculator struct and its impl block 71 | file_path = os.path.join("src", "lib.rs") 72 | symbols = self.language_server.request_document_symbols(file_path) 73 | 74 | # Find the Calculator struct 75 | calculator_struct = None 76 | calculator_impl = None 77 | for sym in symbols[0]: 78 | if sym.get("name") == "Calculator" and sym.get("kind") == 23: # Struct kind 79 | calculator_struct = sym 80 | elif sym.get("name") == "Calculator" and sym.get("kind") == 11: # Interface/Impl kind 81 | calculator_impl = sym 82 | 83 | assert calculator_struct is not None, "Could not find 'Calculator' struct symbol in lib.rs" 84 | 85 | # The struct should have the 'result' field 86 | struct_children = calculator_struct.get("children", []) 87 | field_names = [child.get("name") for child in struct_children] 88 | assert "result" in field_names, "Calculator struct should have 'result' field" 89 | 90 | # Find the impl block and check its methods 91 | if calculator_impl is not None: 92 | impl_children = calculator_impl.get("children", []) 93 | method_names = [child.get("name") for child in impl_children] 94 | assert "new" in method_names, "Calculator impl should have 'new' method" 95 | assert "add" in method_names, "Calculator impl should have 'add' method" 96 | assert "get_result" in method_names, "Calculator impl should have 'get_result' method" 97 | 98 | def test_overview_methods(self) -> None: 99 | symbols = self.language_server.request_full_symbol_tree() 100 | assert SymbolUtils.symbol_tree_contains_name(symbols, "main"), "main missing from overview" 101 | assert SymbolUtils.symbol_tree_contains_name(symbols, "add"), "add missing from overview" 102 | assert SymbolUtils.symbol_tree_contains_name(symbols, "multiply"), "multiply missing from overview" 103 | assert SymbolUtils.symbol_tree_contains_name(symbols, "Calculator"), "Calculator missing from overview" 104 | 105 | def test_rust_2024_edition_specific(self) -> None: 106 | # Verify we're actually working with the 2024 edition repository 107 | cargo_toml_path = self.test_repo_2024_path / "Cargo.toml" 108 | assert cargo_toml_path.exists(), "Cargo.toml should exist in test repository" 109 | 110 | with open(cargo_toml_path) as f: 111 | content = f.read() 112 | assert 'edition = "2024"' in content, "Should be using Rust 2024 edition" 113 | ``` -------------------------------------------------------------------------------- /test/serena/config/test_serena_config.py: -------------------------------------------------------------------------------- ```python 1 | import shutil 2 | import tempfile 3 | from pathlib import Path 4 | 5 | import pytest 6 | 7 | from serena.config.serena_config import ProjectConfig 8 | from solidlsp.ls_config import Language 9 | 10 | 11 | class TestProjectConfigAutogenerate: 12 | """Test class for ProjectConfig autogeneration functionality.""" 13 | 14 | def setup_method(self): 15 | """Set up test environment before each test method.""" 16 | # Create a temporary directory for testing 17 | self.test_dir = tempfile.mkdtemp() 18 | self.project_path = Path(self.test_dir) 19 | 20 | def teardown_method(self): 21 | """Clean up test environment after each test method.""" 22 | # Remove the temporary directory 23 | shutil.rmtree(self.test_dir) 24 | 25 | def test_autogenerate_empty_directory(self): 26 | """Test that autogenerate raises ValueError with helpful message for empty directory.""" 27 | with pytest.raises(ValueError) as exc_info: 28 | ProjectConfig.autogenerate(self.project_path, save_to_disk=False) 29 | 30 | error_message = str(exc_info.value) 31 | # Check that the error message contains all the key information 32 | assert "No source files found" in error_message 33 | assert str(self.project_path.resolve()) in error_message 34 | assert "To use Serena with this project" in error_message 35 | assert "Add source files in one of the supported languages" in error_message 36 | assert "Create a project configuration file manually" in error_message 37 | assert str(Path(".serena") / "project.yml") in error_message 38 | assert "Example project.yml:" in error_message 39 | assert f"project_name: {self.project_path.name}" in error_message 40 | assert "language: python" in error_message 41 | 42 | def test_autogenerate_with_python_files(self): 43 | """Test successful autogeneration with Python source files.""" 44 | # Create a Python file 45 | python_file = self.project_path / "main.py" 46 | python_file.write_text("def hello():\n print('Hello, world!')\n") 47 | 48 | # Run autogenerate 49 | config = ProjectConfig.autogenerate(self.project_path, save_to_disk=False) 50 | 51 | # Verify the configuration 52 | assert config.project_name == self.project_path.name 53 | assert config.language == Language.PYTHON 54 | 55 | def test_autogenerate_with_multiple_languages(self): 56 | """Test autogeneration picks dominant language when multiple are present.""" 57 | # Create files for multiple languages 58 | (self.project_path / "main.py").write_text("print('Python')") 59 | (self.project_path / "util.py").write_text("def util(): pass") 60 | (self.project_path / "small.js").write_text("console.log('JS');") 61 | 62 | # Run autogenerate - should pick Python as dominant 63 | config = ProjectConfig.autogenerate(self.project_path, save_to_disk=False) 64 | 65 | assert config.language == Language.PYTHON 66 | 67 | def test_autogenerate_saves_to_disk(self): 68 | """Test that autogenerate can save the configuration to disk.""" 69 | # Create a Go file 70 | go_file = self.project_path / "main.go" 71 | go_file.write_text("package main\n\nfunc main() {}\n") 72 | 73 | # Run autogenerate with save_to_disk=True 74 | config = ProjectConfig.autogenerate(self.project_path, save_to_disk=True) 75 | 76 | # Verify the configuration file was created 77 | config_path = self.project_path / ".serena" / "project.yml" 78 | assert config_path.exists() 79 | 80 | # Verify the content 81 | assert config.language == Language.GO 82 | 83 | def test_autogenerate_nonexistent_path(self): 84 | """Test that autogenerate raises FileNotFoundError for non-existent path.""" 85 | non_existent = self.project_path / "does_not_exist" 86 | 87 | with pytest.raises(FileNotFoundError) as exc_info: 88 | ProjectConfig.autogenerate(non_existent, save_to_disk=False) 89 | 90 | assert "Project root not found" in str(exc_info.value) 91 | 92 | def test_autogenerate_with_gitignored_files_only(self): 93 | """Test autogenerate behavior when only gitignored files exist.""" 94 | # Create a .gitignore that ignores all Python files 95 | gitignore = self.project_path / ".gitignore" 96 | gitignore.write_text("*.py\n") 97 | 98 | # Create Python files that will be ignored 99 | (self.project_path / "ignored.py").write_text("print('ignored')") 100 | 101 | # Should still raise ValueError as no source files are detected 102 | with pytest.raises(ValueError) as exc_info: 103 | ProjectConfig.autogenerate(self.project_path, save_to_disk=False) 104 | 105 | assert "No source files found" in str(exc_info.value) 106 | 107 | def test_autogenerate_custom_project_name(self): 108 | """Test autogenerate with custom project name.""" 109 | # Create a TypeScript file 110 | ts_file = self.project_path / "index.ts" 111 | ts_file.write_text("const greeting: string = 'Hello';\n") 112 | 113 | # Run autogenerate with custom name 114 | custom_name = "my-custom-project" 115 | config = ProjectConfig.autogenerate(self.project_path, project_name=custom_name, save_to_disk=False) 116 | 117 | assert config.project_name == custom_name 118 | assert config.language == Language.TYPESCRIPT 119 | 120 | def test_autogenerate_error_message_format(self): 121 | """Test the specific format of the error message for better user experience.""" 122 | with pytest.raises(ValueError) as exc_info: 123 | ProjectConfig.autogenerate(self.project_path, save_to_disk=False) 124 | 125 | error_lines = str(exc_info.value).split("\n") 126 | 127 | # Verify the structure of the error message 128 | assert len(error_lines) >= 8 # Should have multiple lines of helpful information 129 | 130 | # Check for numbered instructions 131 | assert any("1." in line for line in error_lines) 132 | assert any("2." in line for line in error_lines) 133 | 134 | # Check for supported languages list 135 | assert any("Python" in line and "TypeScript" in line for line in error_lines) 136 | 137 | # Check example includes comment about language options 138 | assert any("# or typescript, java, csharp" in line for line in error_lines) 139 | ``` -------------------------------------------------------------------------------- /src/serena/dashboard.py: -------------------------------------------------------------------------------- ```python 1 | import os 2 | import socket 3 | import threading 4 | from collections.abc import Callable 5 | from typing import TYPE_CHECKING, Any 6 | 7 | from flask import Flask, Response, request, send_from_directory 8 | from pydantic import BaseModel 9 | from sensai.util import logging 10 | 11 | from serena.analytics import ToolUsageStats 12 | from serena.constants import SERENA_DASHBOARD_DIR 13 | from serena.util.logging import MemoryLogHandler 14 | 15 | if TYPE_CHECKING: 16 | from serena.agent import SerenaAgent 17 | 18 | log = logging.getLogger(__name__) 19 | 20 | # disable Werkzeug's logging to avoid cluttering the output 21 | logging.getLogger("werkzeug").setLevel(logging.WARNING) 22 | 23 | 24 | class RequestLog(BaseModel): 25 | start_idx: int = 0 26 | 27 | 28 | class ResponseLog(BaseModel): 29 | messages: list[str] 30 | max_idx: int 31 | active_project: str | None = None 32 | 33 | 34 | class ResponseToolNames(BaseModel): 35 | tool_names: list[str] 36 | 37 | 38 | class ResponseToolStats(BaseModel): 39 | stats: dict[str, dict[str, int]] 40 | 41 | 42 | class SerenaDashboardAPI: 43 | log = logging.getLogger(__qualname__) 44 | 45 | def __init__( 46 | self, 47 | memory_log_handler: MemoryLogHandler, 48 | tool_names: list[str], 49 | agent: "SerenaAgent", 50 | shutdown_callback: Callable[[], None] | None = None, 51 | tool_usage_stats: ToolUsageStats | None = None, 52 | ) -> None: 53 | self._memory_log_handler = memory_log_handler 54 | self._tool_names = tool_names 55 | self._agent = agent 56 | self._shutdown_callback = shutdown_callback 57 | self._app = Flask(__name__) 58 | self._tool_usage_stats = tool_usage_stats 59 | self._setup_routes() 60 | 61 | @property 62 | def memory_log_handler(self) -> MemoryLogHandler: 63 | return self._memory_log_handler 64 | 65 | def _setup_routes(self) -> None: 66 | # Static files 67 | @self._app.route("/dashboard/<path:filename>") 68 | def serve_dashboard(filename: str) -> Response: 69 | return send_from_directory(SERENA_DASHBOARD_DIR, filename) 70 | 71 | @self._app.route("/dashboard/") 72 | def serve_dashboard_index() -> Response: 73 | return send_from_directory(SERENA_DASHBOARD_DIR, "index.html") 74 | 75 | # API routes 76 | @self._app.route("/get_log_messages", methods=["POST"]) 77 | def get_log_messages() -> dict[str, Any]: 78 | request_data = request.get_json() 79 | if not request_data: 80 | request_log = RequestLog() 81 | else: 82 | request_log = RequestLog.model_validate(request_data) 83 | 84 | result = self._get_log_messages(request_log) 85 | return result.model_dump() 86 | 87 | @self._app.route("/get_tool_names", methods=["GET"]) 88 | def get_tool_names() -> dict[str, Any]: 89 | result = self._get_tool_names() 90 | return result.model_dump() 91 | 92 | @self._app.route("/get_tool_stats", methods=["GET"]) 93 | def get_tool_stats_route() -> dict[str, Any]: 94 | result = self._get_tool_stats() 95 | return result.model_dump() 96 | 97 | @self._app.route("/clear_tool_stats", methods=["POST"]) 98 | def clear_tool_stats_route() -> dict[str, str]: 99 | self._clear_tool_stats() 100 | return {"status": "cleared"} 101 | 102 | @self._app.route("/get_token_count_estimator_name", methods=["GET"]) 103 | def get_token_count_estimator_name() -> dict[str, str]: 104 | estimator_name = self._tool_usage_stats.token_estimator_name if self._tool_usage_stats else "unknown" 105 | return {"token_count_estimator_name": estimator_name} 106 | 107 | @self._app.route("/shutdown", methods=["PUT"]) 108 | def shutdown() -> dict[str, str]: 109 | self._shutdown() 110 | return {"status": "shutting down"} 111 | 112 | def _get_log_messages(self, request_log: RequestLog) -> ResponseLog: 113 | all_messages = self._memory_log_handler.get_log_messages() 114 | requested_messages = all_messages[request_log.start_idx :] if request_log.start_idx <= len(all_messages) else [] 115 | project = self._agent.get_active_project() 116 | project_name = project.project_name if project else None 117 | return ResponseLog(messages=requested_messages, max_idx=len(all_messages) - 1, active_project=project_name) 118 | 119 | def _get_tool_names(self) -> ResponseToolNames: 120 | return ResponseToolNames(tool_names=self._tool_names) 121 | 122 | def _get_tool_stats(self) -> ResponseToolStats: 123 | if self._tool_usage_stats is not None: 124 | return ResponseToolStats(stats=self._tool_usage_stats.get_tool_stats_dict()) 125 | else: 126 | return ResponseToolStats(stats={}) 127 | 128 | def _clear_tool_stats(self) -> None: 129 | if self._tool_usage_stats is not None: 130 | self._tool_usage_stats.clear() 131 | 132 | def _shutdown(self) -> None: 133 | log.info("Shutting down Serena") 134 | if self._shutdown_callback: 135 | self._shutdown_callback() 136 | else: 137 | # noinspection PyProtectedMember 138 | # noinspection PyUnresolvedReferences 139 | os._exit(0) 140 | 141 | @staticmethod 142 | def _find_first_free_port(start_port: int) -> int: 143 | port = start_port 144 | while port <= 65535: 145 | try: 146 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: 147 | sock.bind(("0.0.0.0", port)) 148 | return port 149 | except OSError: 150 | port += 1 151 | 152 | raise RuntimeError(f"No free ports found starting from {start_port}") 153 | 154 | def run(self, host: str = "0.0.0.0", port: int = 0x5EDA) -> int: 155 | """ 156 | Runs the dashboard on the given host and port and returns the port number. 157 | """ 158 | # patch flask.cli.show_server to avoid printing the server info 159 | from flask import cli 160 | 161 | cli.show_server_banner = lambda *args, **kwargs: None 162 | 163 | self._app.run(host=host, port=port, debug=False, use_reloader=False, threaded=True) 164 | return port 165 | 166 | def run_in_thread(self) -> tuple[threading.Thread, int]: 167 | port = self._find_first_free_port(0x5EDA) 168 | thread = threading.Thread(target=lambda: self.run(port=port), daemon=True) 169 | thread.start() 170 | return thread, port 171 | ``` -------------------------------------------------------------------------------- /src/solidlsp/language_servers/gopls.py: -------------------------------------------------------------------------------- ```python 1 | import logging 2 | import os 3 | import pathlib 4 | import subprocess 5 | import threading 6 | 7 | from overrides import override 8 | 9 | from solidlsp.ls import SolidLanguageServer 10 | from solidlsp.ls_config import LanguageServerConfig 11 | from solidlsp.ls_logger import LanguageServerLogger 12 | from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams 13 | from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo 14 | from solidlsp.settings import SolidLSPSettings 15 | 16 | 17 | class Gopls(SolidLanguageServer): 18 | """ 19 | Provides Go specific instantiation of the LanguageServer class using gopls. 20 | """ 21 | 22 | @override 23 | def is_ignored_dirname(self, dirname: str) -> bool: 24 | # For Go projects, we should ignore: 25 | # - vendor: third-party dependencies vendored into the project 26 | # - node_modules: if the project has JavaScript components 27 | # - dist/build: common output directories 28 | return super().is_ignored_dirname(dirname) or dirname in ["vendor", "node_modules", "dist", "build"] 29 | 30 | @staticmethod 31 | def _get_go_version(): 32 | """Get the installed Go version or None if not found.""" 33 | try: 34 | result = subprocess.run(["go", "version"], capture_output=True, text=True, check=False) 35 | if result.returncode == 0: 36 | return result.stdout.strip() 37 | except FileNotFoundError: 38 | return None 39 | return None 40 | 41 | @staticmethod 42 | def _get_gopls_version(): 43 | """Get the installed gopls version or None if not found.""" 44 | try: 45 | result = subprocess.run(["gopls", "version"], capture_output=True, text=True, check=False) 46 | if result.returncode == 0: 47 | return result.stdout.strip() 48 | except FileNotFoundError: 49 | return None 50 | return None 51 | 52 | @staticmethod 53 | def _setup_runtime_dependency(): 54 | """ 55 | Check if required Go runtime dependencies are available. 56 | Raises RuntimeError with helpful message if dependencies are missing. 57 | """ 58 | go_version = Gopls._get_go_version() 59 | if not go_version: 60 | raise RuntimeError( 61 | "Go is not installed. Please install Go from https://golang.org/doc/install and make sure it is added to your PATH." 62 | ) 63 | 64 | gopls_version = Gopls._get_gopls_version() 65 | if not gopls_version: 66 | raise RuntimeError( 67 | "Found a Go version but gopls is not installed.\n" 68 | "Please install gopls as described in https://pkg.go.dev/golang.org/x/tools/gopls#section-readme\n\n" 69 | "After installation, make sure it is added to your PATH (it might be installed in a different location than Go)." 70 | ) 71 | 72 | return True 73 | 74 | def __init__( 75 | self, config: LanguageServerConfig, logger: LanguageServerLogger, repository_root_path: str, solidlsp_settings: SolidLSPSettings 76 | ): 77 | self._setup_runtime_dependency() 78 | 79 | super().__init__( 80 | config, 81 | logger, 82 | repository_root_path, 83 | ProcessLaunchInfo(cmd="gopls", cwd=repository_root_path), 84 | "go", 85 | solidlsp_settings, 86 | ) 87 | self.server_ready = threading.Event() 88 | self.request_id = 0 89 | 90 | @staticmethod 91 | def _get_initialize_params(repository_absolute_path: str) -> InitializeParams: 92 | """ 93 | Returns the initialize params for the Go Language Server. 94 | """ 95 | root_uri = pathlib.Path(repository_absolute_path).as_uri() 96 | initialize_params = { 97 | "locale": "en", 98 | "capabilities": { 99 | "textDocument": { 100 | "synchronization": {"didSave": True, "dynamicRegistration": True}, 101 | "definition": {"dynamicRegistration": True}, 102 | "documentSymbol": { 103 | "dynamicRegistration": True, 104 | "hierarchicalDocumentSymbolSupport": True, 105 | "symbolKind": {"valueSet": list(range(1, 27))}, 106 | }, 107 | }, 108 | "workspace": {"workspaceFolders": True, "didChangeConfiguration": {"dynamicRegistration": True}}, 109 | }, 110 | "processId": os.getpid(), 111 | "rootPath": repository_absolute_path, 112 | "rootUri": root_uri, 113 | "workspaceFolders": [ 114 | { 115 | "uri": root_uri, 116 | "name": os.path.basename(repository_absolute_path), 117 | } 118 | ], 119 | } 120 | return initialize_params 121 | 122 | def _start_server(self): 123 | """Start gopls server process""" 124 | 125 | def register_capability_handler(params): 126 | return 127 | 128 | def window_log_message(msg): 129 | self.logger.log(f"LSP: window/logMessage: {msg}", logging.INFO) 130 | 131 | def do_nothing(params): 132 | return 133 | 134 | self.server.on_request("client/registerCapability", register_capability_handler) 135 | self.server.on_notification("window/logMessage", window_log_message) 136 | self.server.on_notification("$/progress", do_nothing) 137 | self.server.on_notification("textDocument/publishDiagnostics", do_nothing) 138 | 139 | self.logger.log("Starting gopls server process", logging.INFO) 140 | self.server.start() 141 | initialize_params = self._get_initialize_params(self.repository_root_path) 142 | 143 | self.logger.log( 144 | "Sending initialize request from LSP client to LSP server and awaiting response", 145 | logging.INFO, 146 | ) 147 | init_response = self.server.send.initialize(initialize_params) 148 | 149 | # Verify server capabilities 150 | assert "textDocumentSync" in init_response["capabilities"] 151 | assert "completionProvider" in init_response["capabilities"] 152 | assert "definitionProvider" in init_response["capabilities"] 153 | 154 | self.server.notify.initialized({}) 155 | self.completions_available.set() 156 | 157 | # gopls server is typically ready immediately after initialization 158 | self.server_ready.set() 159 | self.server_ready.wait() 160 | ``` -------------------------------------------------------------------------------- /src/serena/agno.py: -------------------------------------------------------------------------------- ```python 1 | import argparse 2 | import logging 3 | import os 4 | import threading 5 | from pathlib import Path 6 | from typing import Any 7 | 8 | from agno.agent import Agent 9 | from agno.memory import AgentMemory 10 | from agno.models.base import Model 11 | from agno.storage.sqlite import SqliteStorage 12 | from agno.tools.function import Function 13 | from agno.tools.toolkit import Toolkit 14 | from dotenv import load_dotenv 15 | from sensai.util.logging import LogTime 16 | 17 | from serena.agent import SerenaAgent, Tool 18 | from serena.config.context_mode import SerenaAgentContext 19 | from serena.constants import REPO_ROOT 20 | from serena.util.exception import show_fatal_exception_safe 21 | 22 | log = logging.getLogger(__name__) 23 | 24 | 25 | class SerenaAgnoToolkit(Toolkit): 26 | def __init__(self, serena_agent: SerenaAgent): 27 | super().__init__("Serena") 28 | for tool in serena_agent.get_exposed_tool_instances(): 29 | self.functions[tool.get_name_from_cls()] = self._create_agno_function(tool) 30 | log.info("Agno agent functions: %s", list(self.functions.keys())) 31 | 32 | @staticmethod 33 | def _create_agno_function(tool: Tool) -> Function: 34 | def entrypoint(**kwargs: Any) -> str: 35 | if "kwargs" in kwargs: 36 | # Agno sometimes passes a kwargs argument explicitly, so we merge it 37 | kwargs.update(kwargs["kwargs"]) 38 | del kwargs["kwargs"] 39 | log.info(f"Calling tool {tool}") 40 | return tool.apply_ex(log_call=True, catch_exceptions=True, **kwargs) 41 | 42 | function = Function.from_callable(tool.get_apply_fn()) 43 | function.name = tool.get_name_from_cls() 44 | function.entrypoint = entrypoint 45 | function.skip_entrypoint_processing = True 46 | return function 47 | 48 | 49 | class SerenaAgnoAgentProvider: 50 | _agent: Agent | None = None 51 | _lock = threading.Lock() 52 | 53 | @classmethod 54 | def get_agent(cls, model: Model) -> Agent: 55 | """ 56 | Returns the singleton instance of the Serena agent or creates it with the given parameters if it doesn't exist. 57 | 58 | NOTE: This is very ugly with poor separation of concerns, but the way in which the Agno UI works (reloading the 59 | module that defines the `app` variable) essentially forces us to do something like this. 60 | 61 | :param model: the large language model to use for the agent 62 | :return: the agent instance 63 | """ 64 | with cls._lock: 65 | if cls._agent is not None: 66 | return cls._agent 67 | 68 | # change to Serena root 69 | os.chdir(REPO_ROOT) 70 | 71 | load_dotenv() 72 | 73 | parser = argparse.ArgumentParser(description="Serena coding assistant") 74 | 75 | # Create a mutually exclusive group 76 | group = parser.add_mutually_exclusive_group() 77 | 78 | # Add arguments to the group, both pointing to the same destination 79 | group.add_argument( 80 | "--project-file", 81 | required=False, 82 | help="Path to the project (or project.yml file).", 83 | ) 84 | group.add_argument( 85 | "--project", 86 | required=False, 87 | help="Path to the project (or project.yml file).", 88 | ) 89 | args = parser.parse_args() 90 | 91 | args_project_file = args.project or args.project_file 92 | 93 | if args_project_file: 94 | project_file = Path(args_project_file).resolve() 95 | # If project file path is relative, make it absolute by joining with project root 96 | if not project_file.is_absolute(): 97 | # Get the project root directory (parent of scripts directory) 98 | project_root = Path(REPO_ROOT) 99 | project_file = project_root / args_project_file 100 | 101 | # Ensure the path is normalized and absolute 102 | project_file = str(project_file.resolve()) 103 | else: 104 | project_file = None 105 | 106 | with LogTime("Loading Serena agent"): 107 | try: 108 | serena_agent = SerenaAgent(project_file, context=SerenaAgentContext.load("agent")) 109 | except Exception as e: 110 | show_fatal_exception_safe(e) 111 | raise 112 | 113 | # Even though we don't want to keep history between sessions, 114 | # for agno-ui to work as a conversation, we use a persistent storage on disk. 115 | # This storage should be deleted between sessions. 116 | # Note that this might collide with custom options for the agent, like adding vector-search based tools. 117 | # See here for an explanation: https://www.reddit.com/r/agno/comments/1jk6qea/regarding_the_built_in_memory/ 118 | sql_db_path = (Path("temp") / "agno_agent_storage.db").absolute() 119 | sql_db_path.parent.mkdir(exist_ok=True) 120 | # delete the db file if it exists 121 | log.info(f"Deleting DB from PID {os.getpid()}") 122 | if sql_db_path.exists(): 123 | sql_db_path.unlink() 124 | 125 | agno_agent = Agent( 126 | name="Serena", 127 | model=model, 128 | # See explanation above on why storage is needed 129 | storage=SqliteStorage(table_name="serena_agent_sessions", db_file=str(sql_db_path)), 130 | description="A fully-featured coding assistant", 131 | tools=[SerenaAgnoToolkit(serena_agent)], 132 | # The tool calls will be shown in the UI anyway since whether to show them is configurable per tool 133 | # To see detailed logs, you should use the serena logger (configure it in the project file path) 134 | show_tool_calls=False, 135 | markdown=True, 136 | system_message=serena_agent.create_system_prompt(), 137 | telemetry=False, 138 | memory=AgentMemory(), 139 | add_history_to_messages=True, 140 | num_history_responses=100, # you might want to adjust this (expense vs. history awareness) 141 | ) 142 | cls._agent = agno_agent 143 | log.info(f"Agent instantiated: {agno_agent}") 144 | 145 | return agno_agent 146 | ``` -------------------------------------------------------------------------------- /test/solidlsp/bash/test_bash_basic.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Basic integration tests for the bash language server functionality. 3 | 4 | These tests validate the functionality of the language server APIs 5 | like request_document_symbols using the bash test repository. 6 | """ 7 | 8 | import pytest 9 | 10 | from solidlsp import SolidLanguageServer 11 | from solidlsp.ls_config import Language 12 | 13 | 14 | @pytest.mark.bash 15 | class TestBashLanguageServerBasics: 16 | """Test basic functionality of the bash language server.""" 17 | 18 | @pytest.mark.parametrize("language_server", [Language.BASH], indirect=True) 19 | def test_bash_language_server_initialization(self, language_server: SolidLanguageServer) -> None: 20 | """Test that bash language server can be initialized successfully.""" 21 | assert language_server is not None 22 | assert language_server.language == Language.BASH 23 | 24 | @pytest.mark.parametrize("language_server", [Language.BASH], indirect=True) 25 | def test_bash_request_document_symbols(self, language_server: SolidLanguageServer) -> None: 26 | """Test request_document_symbols for bash files.""" 27 | # Test getting symbols from main.sh 28 | all_symbols, _root_symbols = language_server.request_document_symbols("main.sh", include_body=False) 29 | 30 | # Extract function symbols (LSP Symbol Kind 12) 31 | function_symbols = [symbol for symbol in all_symbols if symbol.get("kind") == 12] 32 | function_names = [symbol["name"] for symbol in function_symbols] 33 | 34 | # Should detect all 3 functions from main.sh 35 | assert "greet_user" in function_names, "Should find greet_user function" 36 | assert "process_items" in function_names, "Should find process_items function" 37 | assert "main" in function_names, "Should find main function" 38 | assert len(function_symbols) >= 3, f"Should find at least 3 functions, found {len(function_symbols)}" 39 | 40 | @pytest.mark.parametrize("language_server", [Language.BASH], indirect=True) 41 | def test_bash_request_document_symbols_with_body(self, language_server: SolidLanguageServer) -> None: 42 | """Test request_document_symbols with body extraction.""" 43 | # Test with include_body=True 44 | all_symbols, _root_symbols = language_server.request_document_symbols("main.sh", include_body=True) 45 | 46 | function_symbols = [symbol for symbol in all_symbols if symbol.get("kind") == 12] 47 | 48 | # Find greet_user function and check it has body 49 | greet_user_symbol = next((sym for sym in function_symbols if sym["name"] == "greet_user"), None) 50 | assert greet_user_symbol is not None, "Should find greet_user function" 51 | 52 | if "body" in greet_user_symbol: 53 | body = greet_user_symbol["body"] 54 | assert "function greet_user()" in body, "Function body should contain function definition" 55 | assert "case" in body.lower(), "Function body should contain case statement" 56 | 57 | @pytest.mark.parametrize("language_server", [Language.BASH], indirect=True) 58 | def test_bash_utils_functions(self, language_server: SolidLanguageServer) -> None: 59 | """Test function detection in utils.sh file.""" 60 | # Test with utils.sh as well 61 | utils_all_symbols, _utils_root_symbols = language_server.request_document_symbols("utils.sh", include_body=False) 62 | 63 | utils_function_symbols = [symbol for symbol in utils_all_symbols if symbol.get("kind") == 12] 64 | utils_function_names = [symbol["name"] for symbol in utils_function_symbols] 65 | 66 | # Should detect functions from utils.sh 67 | expected_utils_functions = [ 68 | "to_uppercase", 69 | "to_lowercase", 70 | "trim_whitespace", 71 | "backup_file", 72 | "contains_element", 73 | "log_message", 74 | "is_valid_email", 75 | "is_number", 76 | ] 77 | 78 | for func_name in expected_utils_functions: 79 | assert func_name in utils_function_names, f"Should find {func_name} function in utils.sh" 80 | 81 | assert len(utils_function_symbols) >= 8, f"Should find at least 8 functions in utils.sh, found {len(utils_function_symbols)}" 82 | 83 | @pytest.mark.parametrize("language_server", [Language.BASH], indirect=True) 84 | def test_bash_function_syntax_patterns(self, language_server: SolidLanguageServer) -> None: 85 | """Test that LSP detects different bash function syntax patterns correctly.""" 86 | # Test main.sh (has both 'function' keyword and traditional syntax) 87 | main_all_symbols, _main_root_symbols = language_server.request_document_symbols("main.sh", include_body=False) 88 | main_functions = [symbol for symbol in main_all_symbols if symbol.get("kind") == 12] 89 | main_function_names = [func["name"] for func in main_functions] 90 | 91 | # Test utils.sh (all use 'function' keyword) 92 | utils_all_symbols, _utils_root_symbols = language_server.request_document_symbols("utils.sh", include_body=False) 93 | utils_functions = [symbol for symbol in utils_all_symbols if symbol.get("kind") == 12] 94 | utils_function_names = [func["name"] for func in utils_functions] 95 | 96 | # Verify LSP detects both syntax patterns 97 | # main() uses traditional syntax: main() { 98 | assert "main" in main_function_names, "LSP should detect traditional function syntax" 99 | 100 | # Functions with 'function' keyword: function name() { 101 | assert "greet_user" in main_function_names, "LSP should detect function keyword syntax" 102 | assert "process_items" in main_function_names, "LSP should detect function keyword syntax" 103 | 104 | # Verify all expected utils functions are detected by LSP 105 | expected_utils = [ 106 | "to_uppercase", 107 | "to_lowercase", 108 | "trim_whitespace", 109 | "backup_file", 110 | "contains_element", 111 | "log_message", 112 | "is_valid_email", 113 | "is_number", 114 | ] 115 | 116 | for expected_func in expected_utils: 117 | assert expected_func in utils_function_names, f"LSP should detect {expected_func} function" 118 | 119 | # Verify total counts match expectations 120 | assert len(main_functions) >= 3, f"Should find at least 3 functions in main.sh, found {len(main_functions)}" 121 | assert len(utils_functions) >= 8, f"Should find at least 8 functions in utils.sh, found {len(utils_functions)}" 122 | ``` -------------------------------------------------------------------------------- /test/solidlsp/elixir/conftest.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Elixir-specific test configuration and fixtures. 3 | """ 4 | 5 | import os 6 | import subprocess 7 | import time 8 | from pathlib import Path 9 | 10 | import pytest 11 | 12 | 13 | def ensure_elixir_test_repo_compiled(repo_path: str) -> None: 14 | """Ensure the Elixir test repository dependencies are installed and project is compiled. 15 | 16 | Next LS requires the project to be fully compiled and indexed before providing 17 | complete references and symbol resolution. This function: 18 | 1. Installs dependencies via 'mix deps.get' 19 | 2. Compiles the project via 'mix compile' 20 | 21 | This is essential in CI environments where dependencies aren't pre-installed. 22 | 23 | Args: 24 | repo_path: Path to the Elixir project root directory 25 | 26 | """ 27 | # Check if this looks like an Elixir project 28 | mix_file = os.path.join(repo_path, "mix.exs") 29 | if not os.path.exists(mix_file): 30 | return 31 | 32 | # Check if already compiled (optimization for repeated runs) 33 | build_path = os.path.join(repo_path, "_build") 34 | deps_path = os.path.join(repo_path, "deps") 35 | 36 | if os.path.exists(build_path) and os.path.exists(deps_path): 37 | print(f"Elixir test repository already compiled in {repo_path}") 38 | return 39 | 40 | try: 41 | print("Installing dependencies and compiling Elixir test repository for optimal Next LS performance...") 42 | 43 | # First, install dependencies with increased timeout for CI 44 | print("=" * 60) 45 | print("Step 1/2: Installing Elixir dependencies...") 46 | print("=" * 60) 47 | start_time = time.time() 48 | 49 | deps_result = subprocess.run( 50 | ["mix", "deps.get"], 51 | cwd=repo_path, 52 | capture_output=True, 53 | text=True, 54 | timeout=180, 55 | check=False, # 3 minutes for dependency installation (CI can be slow) 56 | ) 57 | 58 | deps_duration = time.time() - start_time 59 | print(f"Dependencies installation completed in {deps_duration:.2f} seconds") 60 | 61 | # Always log the output for transparency 62 | if deps_result.stdout.strip(): 63 | print("Dependencies stdout:") 64 | print("-" * 40) 65 | print(deps_result.stdout) 66 | print("-" * 40) 67 | 68 | if deps_result.stderr.strip(): 69 | print("Dependencies stderr:") 70 | print("-" * 40) 71 | print(deps_result.stderr) 72 | print("-" * 40) 73 | 74 | if deps_result.returncode != 0: 75 | print(f"⚠️ Warning: Dependencies installation failed with exit code {deps_result.returncode}") 76 | # Continue anyway - some projects might not have dependencies 77 | else: 78 | print("✓ Dependencies installed successfully") 79 | 80 | # Then compile the project with increased timeout for CI 81 | print("=" * 60) 82 | print("Step 2/2: Compiling Elixir project...") 83 | print("=" * 60) 84 | start_time = time.time() 85 | 86 | compile_result = subprocess.run( 87 | ["mix", "compile"], 88 | cwd=repo_path, 89 | capture_output=True, 90 | text=True, 91 | timeout=300, 92 | check=False, # 5 minutes for compilation (Credo compilation can be slow in CI) 93 | ) 94 | 95 | compile_duration = time.time() - start_time 96 | print(f"Compilation completed in {compile_duration:.2f} seconds") 97 | 98 | # Always log the output for transparency 99 | if compile_result.stdout.strip(): 100 | print("Compilation stdout:") 101 | print("-" * 40) 102 | print(compile_result.stdout) 103 | print("-" * 40) 104 | 105 | if compile_result.stderr.strip(): 106 | print("Compilation stderr:") 107 | print("-" * 40) 108 | print(compile_result.stderr) 109 | print("-" * 40) 110 | 111 | if compile_result.returncode == 0: 112 | print(f"✓ Elixir test repository compiled successfully in {repo_path}") 113 | else: 114 | print(f"⚠️ Warning: Compilation completed with exit code {compile_result.returncode}") 115 | # Still continue - warnings are often non-fatal 116 | 117 | print("=" * 60) 118 | print(f"Total setup time: {time.time() - (start_time - compile_duration - deps_duration):.2f} seconds") 119 | print("=" * 60) 120 | 121 | except subprocess.TimeoutExpired as e: 122 | print("=" * 60) 123 | print(f"❌ TIMEOUT: Elixir setup timed out after {e.timeout} seconds") 124 | print(f"Command: {' '.join(e.cmd)}") 125 | print("This may indicate slow CI environment - Next LS may still work but with reduced functionality") 126 | 127 | # Try to get partial output if available 128 | if hasattr(e, "stdout") and e.stdout: 129 | print("Partial stdout before timeout:") 130 | print("-" * 40) 131 | print(e.stdout) 132 | print("-" * 40) 133 | if hasattr(e, "stderr") and e.stderr: 134 | print("Partial stderr before timeout:") 135 | print("-" * 40) 136 | print(e.stderr) 137 | print("-" * 40) 138 | print("=" * 60) 139 | 140 | except FileNotFoundError: 141 | print("❌ ERROR: 'mix' command not found - Elixir test repository may not be compiled") 142 | print("Please ensure Elixir is installed and available in PATH") 143 | except Exception as e: 144 | print(f"❌ ERROR: Failed to prepare Elixir test repository: {e}") 145 | 146 | 147 | @pytest.fixture(scope="session", autouse=True) 148 | def setup_elixir_test_environment(): 149 | """Automatically prepare Elixir test environment for all Elixir tests. 150 | 151 | This fixture runs once per test session and automatically: 152 | 1. Installs dependencies via 'mix deps.get' 153 | 2. Compiles the Elixir test repository via 'mix compile' 154 | 155 | It uses autouse=True so it runs automatically without needing to be explicitly 156 | requested by tests. This ensures Next LS has a fully prepared project to work with. 157 | 158 | Uses generous timeouts (3-5 minutes) to accommodate slow CI environments. 159 | All output is logged for transparency and debugging. 160 | """ 161 | # Get the test repo path relative to this conftest.py file 162 | test_repo_path = Path(__file__).parent.parent.parent / "resources" / "repos" / "elixir" / "test_repo" 163 | ensure_elixir_test_repo_compiled(str(test_repo_path)) 164 | return str(test_repo_path) 165 | 166 | 167 | @pytest.fixture(scope="session") 168 | def elixir_test_repo_path(setup_elixir_test_environment): 169 | """Get the path to the prepared Elixir test repository. 170 | 171 | This fixture depends on setup_elixir_test_environment to ensure dependencies 172 | are installed and compilation has completed before returning the path. 173 | """ 174 | return setup_elixir_test_environment 175 | ``` -------------------------------------------------------------------------------- /test/solidlsp/erlang/conftest.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Erlang-specific test configuration and fixtures. 3 | """ 4 | 5 | import os 6 | import subprocess 7 | import time 8 | from pathlib import Path 9 | 10 | import pytest 11 | 12 | 13 | def ensure_erlang_test_repo_compiled(repo_path: str) -> None: 14 | """Ensure the Erlang test repository dependencies are installed and project is compiled. 15 | 16 | Erlang LS requires the project to be fully compiled and indexed before providing 17 | complete references and symbol resolution. This function: 18 | 1. Installs dependencies via 'rebar3 deps' 19 | 2. Compiles the project via 'rebar3 compile' 20 | 21 | This is essential in CI environments where dependencies aren't pre-installed. 22 | 23 | Args: 24 | repo_path: Path to the Erlang project root directory 25 | 26 | """ 27 | # Check if this looks like an Erlang project 28 | rebar_config = os.path.join(repo_path, "rebar.config") 29 | if not os.path.exists(rebar_config): 30 | return 31 | 32 | # Check if already compiled (optimization for repeated runs) 33 | build_path = os.path.join(repo_path, "_build") 34 | deps_path = os.path.join(repo_path, "deps") 35 | 36 | if os.path.exists(build_path) and os.path.exists(deps_path): 37 | print(f"Erlang test repository already compiled in {repo_path}") 38 | return 39 | 40 | try: 41 | print("Installing dependencies and compiling Erlang test repository for optimal Erlang LS performance...") 42 | 43 | # First, install dependencies with increased timeout for CI 44 | print("=" * 60) 45 | print("Step 1/2: Installing Erlang dependencies...") 46 | print("=" * 60) 47 | start_time = time.time() 48 | 49 | deps_result = subprocess.run( 50 | ["rebar3", "deps"], 51 | cwd=repo_path, 52 | capture_output=True, 53 | text=True, 54 | timeout=180, 55 | check=False, # 3 minutes for dependency installation (CI can be slow) 56 | ) 57 | 58 | deps_duration = time.time() - start_time 59 | print(f"Dependencies installation completed in {deps_duration:.2f} seconds") 60 | 61 | # Always log the output for transparency 62 | if deps_result.stdout.strip(): 63 | print("Dependencies stdout:") 64 | print("-" * 40) 65 | print(deps_result.stdout) 66 | print("-" * 40) 67 | 68 | if deps_result.stderr.strip(): 69 | print("Dependencies stderr:") 70 | print("-" * 40) 71 | print(deps_result.stderr) 72 | print("-" * 40) 73 | 74 | if deps_result.returncode != 0: 75 | print(f"⚠️ Warning: Dependencies installation failed with exit code {deps_result.returncode}") 76 | # Continue anyway - some projects might not have dependencies 77 | else: 78 | print("✓ Dependencies installed successfully") 79 | 80 | # Then compile the project with increased timeout for CI 81 | print("=" * 60) 82 | print("Step 2/2: Compiling Erlang project...") 83 | print("=" * 60) 84 | start_time = time.time() 85 | 86 | compile_result = subprocess.run( 87 | ["rebar3", "compile"], 88 | cwd=repo_path, 89 | capture_output=True, 90 | text=True, 91 | timeout=300, 92 | check=False, # 5 minutes for compilation (Dialyzer can be slow in CI) 93 | ) 94 | 95 | compile_duration = time.time() - start_time 96 | print(f"Compilation completed in {compile_duration:.2f} seconds") 97 | 98 | # Always log the output for transparency 99 | if compile_result.stdout.strip(): 100 | print("Compilation stdout:") 101 | print("-" * 40) 102 | print(compile_result.stdout) 103 | print("-" * 40) 104 | 105 | if compile_result.stderr.strip(): 106 | print("Compilation stderr:") 107 | print("-" * 40) 108 | print(compile_result.stderr) 109 | print("-" * 40) 110 | 111 | if compile_result.returncode == 0: 112 | print(f"✓ Erlang test repository compiled successfully in {repo_path}") 113 | else: 114 | print(f"⚠️ Warning: Compilation completed with exit code {compile_result.returncode}") 115 | # Still continue - warnings are often non-fatal 116 | 117 | print("=" * 60) 118 | print(f"Total setup time: {time.time() - (start_time - compile_duration - deps_duration):.2f} seconds") 119 | print("=" * 60) 120 | 121 | except subprocess.TimeoutExpired as e: 122 | print("=" * 60) 123 | print(f"❌ TIMEOUT: Erlang setup timed out after {e.timeout} seconds") 124 | print(f"Command: {' '.join(e.cmd)}") 125 | print("This may indicate slow CI environment - Erlang LS may still work but with reduced functionality") 126 | 127 | # Try to get partial output if available 128 | if hasattr(e, "stdout") and e.stdout: 129 | print("Partial stdout before timeout:") 130 | print("-" * 40) 131 | print(e.stdout) 132 | print("-" * 40) 133 | if hasattr(e, "stderr") and e.stderr: 134 | print("Partial stderr before timeout:") 135 | print("-" * 40) 136 | print(e.stderr) 137 | print("-" * 40) 138 | print("=" * 60) 139 | 140 | except FileNotFoundError: 141 | print("❌ ERROR: 'rebar3' command not found - Erlang test repository may not be compiled") 142 | print("Please ensure rebar3 is installed and available in PATH") 143 | except Exception as e: 144 | print(f"❌ ERROR: Failed to prepare Erlang test repository: {e}") 145 | 146 | 147 | @pytest.fixture(scope="session", autouse=True) 148 | def setup_erlang_test_environment(): 149 | """Automatically prepare Erlang test environment for all Erlang tests. 150 | 151 | This fixture runs once per test session and automatically: 152 | 1. Installs dependencies via 'rebar3 deps' 153 | 2. Compiles the Erlang test repository via 'rebar3 compile' 154 | 155 | It uses autouse=True so it runs automatically without needing to be explicitly 156 | requested by tests. This ensures Erlang LS has a fully prepared project to work with. 157 | 158 | Uses generous timeouts (3-5 minutes) to accommodate slow CI environments. 159 | All output is logged for transparency and debugging. 160 | """ 161 | # Get the test repo path relative to this conftest.py file 162 | test_repo_path = Path(__file__).parent.parent.parent / "resources" / "repos" / "erlang" / "test_repo" 163 | ensure_erlang_test_repo_compiled(str(test_repo_path)) 164 | return str(test_repo_path) 165 | 166 | 167 | @pytest.fixture(scope="session") 168 | def erlang_test_repo_path(setup_erlang_test_environment): 169 | """Get the path to the prepared Erlang test repository. 170 | 171 | This fixture depends on setup_erlang_test_environment to ensure dependencies 172 | are installed and compilation has completed before returning the path. 173 | """ 174 | return setup_erlang_test_environment 175 | ``` -------------------------------------------------------------------------------- /.serena/memories/serena_core_concepts_and_architecture.md: -------------------------------------------------------------------------------- ```markdown 1 | # Serena Core Concepts and Architecture 2 | 3 | ## High-Level Architecture 4 | 5 | Serena is built around a dual-layer architecture: 6 | 7 | 1. **SerenaAgent** - The main orchestrator that manages projects, tools, and user interactions 8 | 2. **SolidLanguageServer** - A unified wrapper around Language Server Protocol (LSP) implementations 9 | 10 | ## Core Components 11 | 12 | ### 1. SerenaAgent (`src/serena/agent.py`) 13 | 14 | The central coordinator that: 15 | - Manages active projects and their configurations 16 | - Coordinates between different tools and contexts 17 | - Handles language server lifecycle 18 | - Manages memory persistence 19 | - Provides MCP (Model Context Protocol) server interface 20 | 21 | Key responsibilities: 22 | - **Project Management** - Activating, switching between projects 23 | - **Tool Registry** - Loading and managing available tools based on context/mode 24 | - **Language Server Integration** - Starting/stopping language servers per project 25 | - **Memory Management** - Persistent storage of project knowledge 26 | - **Task Execution** - Coordinating complex multi-step operations 27 | 28 | ### 2. SolidLanguageServer (`src/solidlsp/ls.py`) 29 | 30 | A unified abstraction over multiple language servers that provides: 31 | - **Language-agnostic interface** for symbol operations 32 | - **Caching layer** for performance optimization 33 | - **Error handling and recovery** for unreliable language servers 34 | - **Uniform API** regardless of underlying LSP implementation 35 | 36 | Core capabilities: 37 | - Symbol discovery and navigation 38 | - Code completion and hover information 39 | - Find references and definitions 40 | - Document and workspace symbol search 41 | - File watching and change notifications 42 | 43 | ### 3. Tool System (`src/serena/tools/`) 44 | 45 | Modular tool architecture with several categories: 46 | 47 | #### File Tools (`file_tools.py`) 48 | - File system operations (read, write, list directories) 49 | - Text search and pattern matching 50 | - Regex-based replacements 51 | 52 | #### Symbol Tools (`symbol_tools.py`) 53 | - Language-aware symbol finding and navigation 54 | - Symbol body replacement and insertion 55 | - Reference finding across codebase 56 | 57 | #### Memory Tools (`memory_tools.py`) 58 | - Project knowledge persistence 59 | - Memory retrieval and management 60 | - Onboarding information storage 61 | 62 | #### Configuration Tools (`config_tools.py`) 63 | - Project activation and switching 64 | - Mode and context management 65 | - Tool inclusion/exclusion 66 | 67 | ### 4. Configuration System (`src/serena/config/`) 68 | 69 | Multi-layered configuration supporting: 70 | - **Contexts** - Define available tools and their behavior 71 | - **Modes** - Specify operational patterns (interactive, editing, etc.) 72 | - **Projects** - Per-project settings and language server configs 73 | - **Tool Sets** - Grouped tool collections for different use cases 74 | 75 | ## Language Server Integration 76 | 77 | ### Language Support Model 78 | 79 | Each supported language has: 80 | 1. **Language Server Implementation** (`src/solidlsp/language_servers/`) 81 | 2. **Runtime Dependencies** - Managed downloads of language servers 82 | 3. **Test Repository** (`test/resources/repos/<language>/`) 83 | 4. **Test Suite** (`test/solidlsp/<language>/`) 84 | 85 | ### Language Server Lifecycle 86 | 87 | 1. **Discovery** - Find language servers or download them automatically 88 | 2. **Initialization** - Start server process and perform LSP handshake 89 | 3. **Project Setup** - Open workspace and configure language-specific settings 90 | 4. **Operation** - Handle requests/responses with caching and error recovery 91 | 5. **Shutdown** - Clean shutdown of server processes 92 | 93 | ### Supported Languages 94 | 95 | Current language support includes: 96 | - **C#** - Microsoft.CodeAnalysis.LanguageServer (.NET 9) 97 | - **Python** - Pyright or Jedi 98 | - **TypeScript/JavaScript** - TypeScript Language Server 99 | - **Rust** - rust-analyzer 100 | - **Go** - gopls 101 | - **Java** - Eclipse JDT Language Server 102 | - **Kotlin** - Kotlin Language Server 103 | - **PHP** - Intelephense 104 | - **Ruby** - Solargraph 105 | - **Clojure** - clojure-lsp 106 | - **Elixir** - ElixirLS 107 | - **Dart** - Dart Language Server 108 | - **C/C++** - clangd 109 | - **Terraform** - terraform-ls 110 | 111 | ## Memory and Knowledge Management 112 | 113 | ### Memory System 114 | - **Markdown-based storage** in `.serena/memories/` directory 115 | - **Contextual retrieval** - memories loaded based on relevance 116 | - **Project-specific** knowledge persistence 117 | - **Onboarding support** - guided setup for new projects 118 | 119 | ### Knowledge Categories 120 | - **Project Structure** - Directory layouts, build systems 121 | - **Architecture Patterns** - How the codebase is organized 122 | - **Development Workflows** - Testing, building, deployment 123 | - **Domain Knowledge** - Business logic and requirements 124 | 125 | ## MCP Server Interface 126 | 127 | Serena exposes its functionality through Model Context Protocol: 128 | - **Tool Discovery** - AI agents can enumerate available tools 129 | - **Context-Aware Operations** - Tools behave based on active project/mode 130 | - **Stateful Sessions** - Maintains project state across interactions 131 | - **Error Handling** - Graceful degradation when tools fail 132 | 133 | ## Error Handling and Resilience 134 | 135 | ### Language Server Reliability 136 | - **Timeout Management** - Configurable timeouts for LSP requests 137 | - **Process Recovery** - Automatic restart of crashed language servers 138 | - **Fallback Behavior** - Graceful degradation when LSP unavailable 139 | - **Caching Strategy** - Reduces impact of server failures 140 | 141 | ### Project Activation Safety 142 | - **Validation** - Verify project structure before activation 143 | - **Error Isolation** - Project failures don't affect other projects 144 | - **Recovery Mechanisms** - Automatic cleanup and retry logic 145 | 146 | ## Performance Considerations 147 | 148 | ### Caching Strategy 149 | - **Symbol Cache** - In-memory caching of expensive symbol operations 150 | - **File System Cache** - Reduced disk I/O for repeated operations 151 | - **Language Server Cache** - Persistent cache across sessions 152 | 153 | ### Resource Management 154 | - **Language Server Pooling** - Reuse servers across projects when possible 155 | - **Memory Management** - Automatic cleanup of unused resources 156 | - **Background Operations** - Async operations don't block user interactions 157 | 158 | ## Extension Points 159 | 160 | ### Adding New Languages 161 | 1. Implement language server class in `src/solidlsp/language_servers/` 162 | 2. Add runtime dependencies configuration 163 | 3. Create test repository and test suite 164 | 4. Update language enumeration and configuration 165 | 166 | ### Adding New Tools 167 | 1. Inherit from `Tool` base class in `tools_base.py` 168 | 2. Implement required methods and parameter validation 169 | 3. Register tool in appropriate tool registry 170 | 4. Add to context/mode configurations as needed 171 | 172 | ### Custom Contexts and Modes 173 | - Define new contexts in YAML configuration files 174 | - Specify tool sets and operational patterns 175 | - Configure for specific development workflows ``` -------------------------------------------------------------------------------- /src/solidlsp/language_servers/dart_language_server.py: -------------------------------------------------------------------------------- ```python 1 | import logging 2 | import os 3 | import pathlib 4 | 5 | from solidlsp.ls import SolidLanguageServer 6 | from solidlsp.ls_logger import LanguageServerLogger 7 | from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo 8 | from solidlsp.settings import SolidLSPSettings 9 | 10 | from .common import RuntimeDependency, RuntimeDependencyCollection 11 | 12 | 13 | class DartLanguageServer(SolidLanguageServer): 14 | """ 15 | Provides Dart specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Dart. 16 | """ 17 | 18 | def __init__(self, config, logger, repository_root_path, solidlsp_settings: SolidLSPSettings): 19 | """ 20 | Creates a DartServer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead. 21 | """ 22 | executable_path = self._setup_runtime_dependencies(logger, solidlsp_settings) 23 | super().__init__( 24 | config, 25 | logger, 26 | repository_root_path, 27 | ProcessLaunchInfo(cmd=executable_path, cwd=repository_root_path), 28 | "dart", 29 | solidlsp_settings, 30 | ) 31 | 32 | @classmethod 33 | def _setup_runtime_dependencies(cls, logger: "LanguageServerLogger", solidlsp_settings: SolidLSPSettings) -> str: 34 | deps = RuntimeDependencyCollection( 35 | [ 36 | RuntimeDependency( 37 | id="DartLanguageServer", 38 | description="Dart Language Server for Linux (x64)", 39 | url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-linux-x64-release.zip", 40 | platform_id="linux-x64", 41 | archive_type="zip", 42 | binary_name="dart-sdk/bin/dart", 43 | ), 44 | RuntimeDependency( 45 | id="DartLanguageServer", 46 | description="Dart Language Server for Windows (x64)", 47 | url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-windows-x64-release.zip", 48 | platform_id="win-x64", 49 | archive_type="zip", 50 | binary_name="dart-sdk/bin/dart.exe", 51 | ), 52 | RuntimeDependency( 53 | id="DartLanguageServer", 54 | description="Dart Language Server for Windows (arm64)", 55 | url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-windows-arm64-release.zip", 56 | platform_id="win-arm64", 57 | archive_type="zip", 58 | binary_name="dart-sdk/bin/dart.exe", 59 | ), 60 | RuntimeDependency( 61 | id="DartLanguageServer", 62 | description="Dart Language Server for macOS (x64)", 63 | url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-macos-x64-release.zip", 64 | platform_id="osx-x64", 65 | archive_type="zip", 66 | binary_name="dart-sdk/bin/dart", 67 | ), 68 | RuntimeDependency( 69 | id="DartLanguageServer", 70 | description="Dart Language Server for macOS (arm64)", 71 | url="https://storage.googleapis.com/dart-archive/channels/stable/release/3.7.1/sdk/dartsdk-macos-arm64-release.zip", 72 | platform_id="osx-arm64", 73 | archive_type="zip", 74 | binary_name="dart-sdk/bin/dart", 75 | ), 76 | ] 77 | ) 78 | 79 | dart_ls_dir = cls.ls_resources_dir(solidlsp_settings) 80 | dart_executable_path = deps.binary_path(dart_ls_dir) 81 | 82 | if not os.path.exists(dart_executable_path): 83 | deps.install(logger, dart_ls_dir) 84 | 85 | assert os.path.exists(dart_executable_path) 86 | os.chmod(dart_executable_path, 0o755) 87 | 88 | return f"{dart_executable_path} language-server --client-id multilspy.dart --client-version 1.2" 89 | 90 | @staticmethod 91 | def _get_initialize_params(repository_absolute_path: str): 92 | """ 93 | Returns the initialize params for the Dart Language Server. 94 | """ 95 | root_uri = pathlib.Path(repository_absolute_path).as_uri() 96 | initialize_params = { 97 | "capabilities": {}, 98 | "initializationOptions": { 99 | "onlyAnalyzeProjectsWithOpenFiles": False, 100 | "closingLabels": False, 101 | "outline": False, 102 | "flutterOutline": False, 103 | "allowOpenUri": False, 104 | }, 105 | "trace": "verbose", 106 | "processId": os.getpid(), 107 | "rootPath": repository_absolute_path, 108 | "rootUri": pathlib.Path(repository_absolute_path).as_uri(), 109 | "workspaceFolders": [ 110 | { 111 | "uri": root_uri, 112 | "name": os.path.basename(repository_absolute_path), 113 | } 114 | ], 115 | } 116 | 117 | return initialize_params 118 | 119 | def _start_server(self): 120 | """ 121 | Start the language server and yield when the server is ready. 122 | """ 123 | 124 | def execute_client_command_handler(params): 125 | return [] 126 | 127 | def do_nothing(params): 128 | return 129 | 130 | def check_experimental_status(params): 131 | pass 132 | 133 | def window_log_message(msg): 134 | self.logger.log(f"LSP: window/logMessage: {msg}", logging.INFO) 135 | 136 | self.server.on_request("client/registerCapability", do_nothing) 137 | self.server.on_notification("language/status", do_nothing) 138 | self.server.on_notification("window/logMessage", window_log_message) 139 | self.server.on_request("workspace/executeClientCommand", execute_client_command_handler) 140 | self.server.on_notification("$/progress", do_nothing) 141 | self.server.on_notification("textDocument/publishDiagnostics", do_nothing) 142 | self.server.on_notification("language/actionableNotification", do_nothing) 143 | self.server.on_notification("experimental/serverStatus", check_experimental_status) 144 | 145 | self.logger.log("Starting dart-language-server server process", logging.INFO) 146 | self.server.start() 147 | initialize_params = self._get_initialize_params(self.repository_root_path) 148 | self.logger.log( 149 | "Sending initialize request to dart-language-server", 150 | logging.DEBUG, 151 | ) 152 | init_response = self.server.send_request("initialize", initialize_params) 153 | self.logger.log( 154 | f"Received initialize response from dart-language-server: {init_response}", 155 | logging.INFO, 156 | ) 157 | 158 | self.server.notify.initialized({}) 159 | ``` -------------------------------------------------------------------------------- /src/serena/tools/jetbrains_plugin_client.py: -------------------------------------------------------------------------------- ```python 1 | """ 2 | Client for the Serena JetBrains Plugin 3 | """ 4 | 5 | import json 6 | import logging 7 | from pathlib import Path 8 | from typing import Any, Optional, Self, TypeVar 9 | 10 | import requests 11 | from requests import Response 12 | from sensai.util.string import ToStringMixin 13 | 14 | from serena.project import Project 15 | 16 | T = TypeVar("T") 17 | log = logging.getLogger(__name__) 18 | 19 | 20 | class SerenaClientError(Exception): 21 | """Base exception for Serena client errors.""" 22 | 23 | 24 | class ConnectionError(SerenaClientError): 25 | """Raised when connection to the service fails.""" 26 | 27 | 28 | class APIError(SerenaClientError): 29 | """Raised when the API returns an error response.""" 30 | 31 | 32 | class ServerNotFoundError(Exception): 33 | """Raised when the plugin's service is not found.""" 34 | 35 | 36 | class JetBrainsPluginClient(ToStringMixin): 37 | """ 38 | Python client for the Serena Backend Service. 39 | 40 | Provides simple methods to interact with all available endpoints. 41 | """ 42 | 43 | BASE_PORT = 0x5EA2 44 | last_port: int | None = None 45 | 46 | def __init__(self, port: int, timeout: int = 30): 47 | self.base_url = f"http://127.0.0.1:{port}" 48 | self.timeout = timeout 49 | self.session = requests.Session() 50 | self.session.headers.update({"Content-Type": "application/json", "Accept": "application/json"}) 51 | 52 | def _tostring_includes(self) -> list[str]: 53 | return ["base_url", "timeout"] 54 | 55 | @classmethod 56 | def from_project(cls, project: Project) -> Self: 57 | resolved_path = Path(project.project_root).resolve() 58 | 59 | if cls.last_port is not None: 60 | client = JetBrainsPluginClient(cls.last_port) 61 | if client.matches(resolved_path): 62 | return client 63 | 64 | for port in range(cls.BASE_PORT, cls.BASE_PORT + 20): 65 | client = JetBrainsPluginClient(port) 66 | if client.matches(resolved_path): 67 | log.info("Found JetBrains IDE service at port %d for project %s", port, resolved_path) 68 | cls.last_port = port 69 | return client 70 | 71 | raise ServerNotFoundError("Found no Serena service in a JetBrains IDE instance for the project at " + str(resolved_path)) 72 | 73 | def matches(self, resolved_path: Path) -> bool: 74 | try: 75 | return Path(self.project_root()).resolve() == resolved_path 76 | except ConnectionError: 77 | return False 78 | 79 | def _make_request(self, method: str, endpoint: str, data: Optional[dict] = None) -> dict[str, Any]: 80 | url = f"{self.base_url}{endpoint}" 81 | 82 | response: Response | None = None 83 | try: 84 | if method.upper() == "GET": 85 | response = self.session.get(url, timeout=self.timeout) 86 | elif method.upper() == "POST": 87 | json_data = json.dumps(data) if data else None 88 | response = self.session.post(url, data=json_data, timeout=self.timeout) 89 | else: 90 | raise ValueError(f"Unsupported HTTP method: {method}") 91 | 92 | response.raise_for_status() 93 | 94 | # Try to parse JSON response 95 | try: 96 | return self._pythonify_response(response.json()) 97 | except json.JSONDecodeError: 98 | # If response is not JSON, return raw text 99 | return {"response": response.text} 100 | 101 | except requests.exceptions.ConnectionError as e: 102 | raise ConnectionError(f"Failed to connect to Serena service at {url}: {e}") 103 | except requests.exceptions.Timeout as e: 104 | raise ConnectionError(f"Request to {url} timed out: {e}") 105 | except requests.exceptions.HTTPError as e: 106 | if response is not None: 107 | raise APIError(f"API request failed with status {response.status_code}: {response.text}") 108 | raise APIError(f"API request failed with HTTP error: {e}") 109 | except requests.exceptions.RequestException as e: 110 | raise SerenaClientError(f"Request failed: {e}") 111 | 112 | @staticmethod 113 | def _pythonify_response(response: T) -> T: 114 | """ 115 | Converts dictionary keys from camelCase to snake_case recursively. 116 | 117 | :response: the response in which to convert keys (dictionary or list) 118 | """ 119 | to_snake_case = lambda s: "".join(["_" + c.lower() if c.isupper() else c for c in s]) 120 | 121 | def convert(x): # type: ignore 122 | if isinstance(x, dict): 123 | return {to_snake_case(k): convert(v) for k, v in x.items()} 124 | elif isinstance(x, list): 125 | return [convert(item) for item in x] 126 | else: 127 | return x 128 | 129 | return convert(response) 130 | 131 | def project_root(self) -> str: 132 | response = self._make_request("GET", "/status") 133 | return response["project_root"] 134 | 135 | def find_symbol( 136 | self, name_path: str, relative_path: str | None = None, include_body: bool = False, depth: int = 0, include_location: bool = False 137 | ) -> dict[str, Any]: 138 | """ 139 | Find symbols by name. 140 | 141 | :param name_path: the name path to match 142 | :param relative_path: the relative path to which to restrict the search 143 | :param include_body: whether to include symbol body content 144 | :param depth: depth of children to include (0 = no children) 145 | 146 | :return: Dictionary containing 'symbols' list with matching symbols 147 | """ 148 | request_data = { 149 | "namePath": name_path, 150 | "relativePath": relative_path, 151 | "includeBody": include_body, 152 | "depth": depth, 153 | "includeLocation": include_location, 154 | } 155 | return self._make_request("POST", "/findSymbol", request_data) 156 | 157 | def find_references(self, name_path: str, relative_path: str) -> dict[str, Any]: 158 | """ 159 | Find references to a symbol. 160 | 161 | :param name_path: the name path of the symbol 162 | :param relative_path: the relative path 163 | :return: dictionary containing 'symbols' list with symbol references 164 | """ 165 | request_data = {"namePath": name_path, "relativePath": relative_path} 166 | return self._make_request("POST", "/findReferences", request_data) 167 | 168 | def get_symbols_overview(self, relative_path: str) -> dict[str, Any]: 169 | """ 170 | :param relative_path: the relative path to a source file 171 | """ 172 | request_data = {"relativePath": relative_path} 173 | return self._make_request("POST", "/getSymbolsOverview", request_data) 174 | 175 | def is_service_available(self) -> bool: 176 | try: 177 | self.project_root() 178 | return True 179 | except (ConnectionError, APIError): 180 | return False 181 | 182 | def close(self) -> None: 183 | self.session.close() 184 | 185 | def __enter__(self) -> Self: 186 | return self 187 | 188 | def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore 189 | self.close() 190 | ``` -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- ```toml 1 | [build-system] 2 | build-backend = "hatchling.build" 3 | requires = ["hatchling"] 4 | 5 | [project] 6 | name = "serena-agent" 7 | version = "0.1.4" 8 | description = "" 9 | authors = [{ name = "Oraios AI", email = "[email protected]" }] 10 | readme = "README.md" 11 | requires-python = ">=3.11, <3.12" 12 | classifiers = [ 13 | "License :: OSI Approved :: MIT License", 14 | "Programming Language :: Python :: 3.11", 15 | ] 16 | dependencies = [ 17 | "requests>=2.32.3,<3", 18 | "pyright>=1.1.396,<2", 19 | "overrides>=7.7.0,<8", 20 | "python-dotenv>=1.0.0, <2", 21 | "mcp==1.12.3", 22 | "flask>=3.0.0", 23 | "sensai-utils>=1.5.0", 24 | "pydantic>=2.10.6", 25 | "types-pyyaml>=6.0.12.20241230", 26 | "pyyaml>=6.0.2", 27 | "ruamel.yaml>=0.18.0", 28 | "jinja2>=3.1.6", 29 | "dotenv>=0.9.9", 30 | "pathspec>=0.12.1", 31 | "psutil>=7.0.0", 32 | "docstring_parser>=0.16", 33 | "joblib>=1.5.1", 34 | "tqdm>=4.67.1", 35 | "tiktoken>=0.9.0", 36 | "anthropic>=0.54.0", 37 | ] 38 | 39 | [[tool.uv.index]] 40 | name = "testpypi" 41 | url = "https://test.pypi.org/simple/" 42 | publish-url = "https://test.pypi.org/legacy/" 43 | explicit = true 44 | 45 | [project.scripts] 46 | serena = "serena.cli:top_level" 47 | serena-mcp-server = "serena.cli:start_mcp_server" 48 | index-project = "serena.cli:index_project" # deprecated 49 | 50 | [project.license] 51 | text = "MIT" 52 | 53 | [project.optional-dependencies] 54 | dev = [ 55 | "black[jupyter]>=23.7.0", 56 | "jinja2", 57 | # In version 1.0.4 we get a NoneType error related to some config conversion (yml_analytics is None and should be a list) 58 | "mypy>=1.16.1", 59 | "poethepoet>=0.20.0", 60 | "pytest>=8.0.2", 61 | "pytest-xdist>=3.5.0", 62 | "ruff>=0.0.285", 63 | "toml-sort>=0.24.2", 64 | "types-pyyaml>=6.0.12.20241230", 65 | "syrupy>=4.9.1", 66 | "types-requests>=2.32.4.20241230", 67 | ] 68 | agno = ["agno>=1.2.6", "sqlalchemy>=2.0.40"] 69 | google = ["google-genai>=1.8.0"] 70 | 71 | [project.urls] 72 | Homepage = "https://github.com/oraios/serena" 73 | 74 | [tool.hatch.build.targets.wheel] 75 | packages = ["src/serena", "src/interprompt", "src/solidlsp"] 76 | 77 | [tool.black] 78 | line-length = 140 79 | target-version = ["py311"] 80 | exclude = ''' 81 | /( 82 | src/solidlsp/language_servers/.*/static|src/multilspy 83 | )/ 84 | ''' 85 | 86 | [tool.doc8] 87 | max-line-length = 1000 88 | 89 | [tool.mypy] 90 | allow_redefinition = true 91 | check_untyped_defs = true 92 | disallow_incomplete_defs = true 93 | disallow_untyped_defs = true 94 | ignore_missing_imports = true 95 | no_implicit_optional = true 96 | pretty = true 97 | show_error_codes = true 98 | show_error_context = true 99 | show_traceback = true 100 | strict_equality = true 101 | strict_optional = true 102 | warn_no_return = true 103 | warn_redundant_casts = true 104 | warn_unreachable = true 105 | warn_unused_configs = true 106 | warn_unused_ignores = false 107 | exclude = "^build/|^docs/" 108 | 109 | [tool.poe.env] 110 | PYDEVD_DISABLE_FILE_VALIDATION = "1" 111 | 112 | [tool.poe.tasks] 113 | # Uses PYTEST_MARKERS env var for default markers 114 | # For custom markers, one can either adjust the env var or just use -m option in the command line, 115 | # as the second -m option will override the first one. 116 | test = "pytest test -vv -m \"${PYTEST_MARKERS:-not java and not rust and not erlang}\"" 117 | _black_check = "black --check src scripts test" 118 | _ruff_check = "ruff check src scripts test" 119 | _black_format = "black src scripts test" 120 | _ruff_format = "ruff check --fix src scripts test" 121 | lint = ["_black_check", "_ruff_check"] 122 | format = ["_ruff_format", "_black_format"] 123 | _mypy = "mypy src/serena" 124 | type-check = ["_mypy"] 125 | 126 | [tool.ruff] 127 | target-version = "py311" 128 | line-length = 140 129 | exclude = ["src/solidlsp/language_servers/**/static", "src/multilspy"] 130 | 131 | [tool.ruff.format] 132 | quote-style = "double" 133 | indent-style = "space" 134 | line-ending = "auto" 135 | skip-magic-trailing-comma = false 136 | docstring-code-format = true 137 | 138 | [tool.ruff.lint] 139 | select = [ 140 | "ASYNC", 141 | "B", 142 | "C4", 143 | "C90", 144 | "COM", 145 | "D", 146 | "DTZ", 147 | "E", 148 | "F", 149 | "FLY", 150 | "G", 151 | "I", 152 | "ISC", 153 | "PIE", 154 | "PLC", 155 | "PLE", 156 | "PLW", 157 | "RET", 158 | "RUF", 159 | "RSE", 160 | "SIM", 161 | "TID", 162 | "UP", 163 | "W", 164 | "YTT", 165 | ] 166 | ignore = [ 167 | "PLC0415", 168 | "RUF002", 169 | "RUF005", 170 | "SIM118", 171 | "SIM108", 172 | "E501", 173 | "E741", 174 | "B008", 175 | "B011", 176 | "B028", 177 | "D100", 178 | "D101", 179 | "D102", 180 | "D103", 181 | "D104", 182 | "D105", 183 | "D107", 184 | "D200", 185 | "D203", 186 | "D213", 187 | "D401", 188 | "D402", 189 | "DTZ005", 190 | "E402", 191 | "E501", 192 | "E701", 193 | "E731", 194 | "C408", 195 | "E203", 196 | "G004", 197 | "RET505", 198 | "D106", 199 | "D205", 200 | "D212", 201 | "PLW2901", 202 | "B027", 203 | "D404", 204 | "D407", 205 | "D408", 206 | "D409", 207 | "D400", 208 | "D415", 209 | "COM812", 210 | "RET503", 211 | "RET504", 212 | "UP038", 213 | "F403", 214 | "F405", 215 | "C401", 216 | "C901", 217 | "ASYNC230", 218 | "ISC003", 219 | "B024", 220 | "B007", 221 | "SIM102", 222 | "W291", 223 | "W293", 224 | "B009", 225 | "SIM103", # forbids multiple returns 226 | "SIM110", # requires use of any(...) instead of for-loop 227 | "G001", # forbids str.format in log statements 228 | "E722", # forbids unspecific except clause 229 | "SIM105", # forbids empty/general except clause 230 | "SIM113", # wants to enforce use of enumerate 231 | "E712", # forbids equality comparison with True/False 232 | "UP007", # forbids some uses of Union 233 | "TID252", # forbids relative imports 234 | "B904", # forces use of raise from other_exception 235 | "RUF012", # forbids mutable attributes as ClassVar 236 | "SIM117", # forbids nested with statements 237 | "C400", # wants to unnecessarily force use of list comprehension 238 | "UP037", # can incorrectly (!) convert quoted type to unquoted type, causing an error 239 | "UP045", # imposes T | None instead of Optional[T] 240 | ] 241 | unfixable = ["F841", "F601", "F602", "B018"] 242 | extend-fixable = ["F401", "B905", "W291"] 243 | 244 | [tool.ruff.lint.mccabe] 245 | max-complexity = 20 246 | 247 | [tool.ruff.lint.per-file-ignores] 248 | "tests/**" = ["D103"] 249 | "scripts/**" = ["D103"] 250 | 251 | [tool.pytest.ini_options] 252 | addopts = "--snapshot-patch-pycharm-diff" 253 | markers = [ 254 | "clojure: language server running for Clojure", 255 | "python: language server running for Python", 256 | "go: language server running for Go", 257 | "java: language server running for Java", 258 | "kotlin: language server running for kotlin", 259 | "rust: language server running for Rust", 260 | "typescript: language server running for TypeScript", 261 | "php: language server running for PHP", 262 | "perl: language server running for Perl", 263 | "csharp: language server running for C#", 264 | "elixir: language server running for Elixir", 265 | "elm: language server running for Elm", 266 | "terraform: language server running for Terraform", 267 | "swift: language server running for Swift", 268 | "bash: language server running for Bash", 269 | "r: language server running for R", 270 | "snapshot: snapshot tests for symbolic editing operations", 271 | "ruby: language server running for Ruby (uses ruby-lsp)", 272 | "zig: language server running for Zig", 273 | "lua: language server running for Lua", 274 | "nix: language server running for Nix", 275 | "dart: language server running for Dart", 276 | "erlang: language server running for Erlang", 277 | "al: language server running for AL (Microsoft Dynamics 365 Business Central)", 278 | "markdown: language server running for Markdown", 279 | ] 280 | 281 | [tool.codespell] 282 | # Ref: https://github.com/codespell-project/codespell#using-a-config-file 283 | skip = '.git*,*.svg,*.lock,*.min.*' 284 | check-hidden = true 285 | # ignore-regex = '' 286 | # ignore-words-list = '' ``` -------------------------------------------------------------------------------- /src/solidlsp/language_servers/common.py: -------------------------------------------------------------------------------- ```python 1 | from __future__ import annotations 2 | 3 | import logging 4 | import os 5 | import platform 6 | import subprocess 7 | from collections.abc import Iterable, Mapping, Sequence 8 | from dataclasses import dataclass, replace 9 | from typing import Any, cast 10 | 11 | from solidlsp.ls_logger import LanguageServerLogger 12 | from solidlsp.ls_utils import FileUtils, PlatformUtils 13 | from solidlsp.util.subprocess_util import subprocess_kwargs 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | @dataclass(kw_only=True) 19 | class RuntimeDependency: 20 | """Represents a runtime dependency for a language server.""" 21 | 22 | id: str 23 | platform_id: str | None = None 24 | url: str | None = None 25 | archive_type: str | None = None 26 | binary_name: str | None = None 27 | command: str | list[str] | None = None 28 | package_name: str | None = None 29 | package_version: str | None = None 30 | extract_path: str | None = None 31 | description: str | None = None 32 | 33 | 34 | class RuntimeDependencyCollection: 35 | """Utility to handle installation of runtime dependencies.""" 36 | 37 | def __init__(self, dependencies: Sequence[RuntimeDependency], overrides: Iterable[Mapping[str, Any]] = ()) -> None: 38 | """Initialize the collection with a list of dependencies and optional overrides. 39 | 40 | :param dependencies: List of base RuntimeDependency instances. The combination of 'id' and 'platform_id' must be unique. 41 | :param overrides: List of dictionaries which represent overrides or additions to the base dependencies. 42 | Each entry must contain at least the 'id' key, and optionally 'platform_id' to uniquely identify the dependency to override. 43 | """ 44 | self._id_and_platform_id_to_dep: dict[tuple[str, str | None], RuntimeDependency] = {} 45 | for dep in dependencies: 46 | dep_key = (dep.id, dep.platform_id) 47 | if dep_key in self._id_and_platform_id_to_dep: 48 | raise ValueError(f"Duplicate runtime dependency with id '{dep.id}' and platform_id '{dep.platform_id}':\n{dep}") 49 | self._id_and_platform_id_to_dep[dep_key] = dep 50 | 51 | for dep_values_override in overrides: 52 | override_key = cast(tuple[str, str | None], (dep_values_override["id"], dep_values_override.get("platform_id"))) 53 | base_dep = self._id_and_platform_id_to_dep.get(override_key) 54 | if base_dep is None: 55 | new_runtime_dep = RuntimeDependency(**dep_values_override) 56 | self._id_and_platform_id_to_dep[override_key] = new_runtime_dep 57 | else: 58 | self._id_and_platform_id_to_dep[override_key] = replace(base_dep, **dep_values_override) 59 | 60 | def get_dependencies_for_platform(self, platform_id: str) -> list[RuntimeDependency]: 61 | return [d for d in self._id_and_platform_id_to_dep.values() if d.platform_id in (platform_id, "any", "platform-agnostic", None)] 62 | 63 | def get_dependencies_for_current_platform(self) -> list[RuntimeDependency]: 64 | return self.get_dependencies_for_platform(PlatformUtils.get_platform_id().value) 65 | 66 | def get_single_dep_for_current_platform(self, dependency_id: str | None = None) -> RuntimeDependency: 67 | deps = self.get_dependencies_for_current_platform() 68 | if dependency_id is not None: 69 | deps = [d for d in deps if d.id == dependency_id] 70 | if len(deps) != 1: 71 | raise RuntimeError( 72 | f"Expected exactly one runtime dependency for platform-{PlatformUtils.get_platform_id().value} and {dependency_id=}, found {len(deps)}" 73 | ) 74 | return deps[0] 75 | 76 | def binary_path(self, target_dir: str) -> str: 77 | dep = self.get_single_dep_for_current_platform() 78 | if not dep.binary_name: 79 | return target_dir 80 | return os.path.join(target_dir, dep.binary_name) 81 | 82 | def install(self, logger: LanguageServerLogger, target_dir: str) -> dict[str, str]: 83 | """Install all dependencies for the current platform into *target_dir*. 84 | 85 | Returns a mapping from dependency id to the resolved binary path. 86 | """ 87 | os.makedirs(target_dir, exist_ok=True) 88 | results: dict[str, str] = {} 89 | for dep in self.get_dependencies_for_current_platform(): 90 | if dep.url: 91 | self._install_from_url(dep, logger, target_dir) 92 | if dep.command: 93 | self._run_command(dep.command, target_dir) 94 | if dep.binary_name: 95 | results[dep.id] = os.path.join(target_dir, dep.binary_name) 96 | else: 97 | results[dep.id] = target_dir 98 | return results 99 | 100 | @staticmethod 101 | def _run_command(command: str | list[str], cwd: str) -> None: 102 | kwargs = subprocess_kwargs() 103 | if not PlatformUtils.get_platform_id().is_windows(): 104 | import pwd 105 | 106 | kwargs["user"] = pwd.getpwuid(os.getuid()).pw_name 107 | 108 | is_windows = platform.system() == "Windows" 109 | if not isinstance(command, str) and not is_windows: 110 | # Since we are using the shell, we need to convert the command list to a single string 111 | # on Linux/macOS 112 | command = " ".join(command) 113 | 114 | log.info("Running command %s in '%s'", f"'{command}'" if isinstance(command, str) else command, cwd) 115 | 116 | completed_process = subprocess.run( 117 | command, 118 | shell=True, 119 | check=True, 120 | cwd=cwd, 121 | stdout=subprocess.PIPE, 122 | stderr=subprocess.STDOUT, 123 | **kwargs, 124 | ) 125 | if completed_process.returncode != 0: 126 | log.warning("Command '%s' failed with return code %d", command, completed_process.returncode) 127 | log.warning("Command output:\n%s", completed_process.stdout) 128 | else: 129 | log.info( 130 | "Command completed successfully", 131 | ) 132 | 133 | @staticmethod 134 | def _install_from_url(dep: RuntimeDependency, logger: LanguageServerLogger, target_dir: str) -> None: 135 | assert dep.url is not None 136 | if dep.archive_type in ("gz", "binary") and dep.binary_name: 137 | dest = os.path.join(target_dir, dep.binary_name) 138 | FileUtils.download_and_extract_archive(logger, dep.url, dest, dep.archive_type) 139 | else: 140 | FileUtils.download_and_extract_archive(logger, dep.url, target_dir, dep.archive_type or "zip") 141 | 142 | 143 | def quote_windows_path(path: str) -> str: 144 | """ 145 | Quote a path for Windows command execution if needed. 146 | 147 | On Windows, paths need to be quoted for proper command execution. 148 | The function checks if the path is already quoted to avoid double-quoting. 149 | On other platforms, the path is returned unchanged. 150 | 151 | Args: 152 | path: The file path to potentially quote 153 | 154 | Returns: 155 | The quoted path on Windows (if not already quoted), unchanged path on other platforms 156 | 157 | """ 158 | if platform.system() == "Windows": 159 | # Check if already quoted to avoid double-quoting 160 | if path.startswith('"') and path.endswith('"'): 161 | return path 162 | return f'"{path}"' 163 | return path 164 | ``` -------------------------------------------------------------------------------- /src/solidlsp/language_servers/r_language_server.py: -------------------------------------------------------------------------------- ```python 1 | import logging 2 | import os 3 | import pathlib 4 | import subprocess 5 | import threading 6 | 7 | from overrides import override 8 | 9 | from solidlsp.ls import SolidLanguageServer 10 | from solidlsp.ls_config import LanguageServerConfig 11 | from solidlsp.ls_logger import LanguageServerLogger 12 | from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams 13 | from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo 14 | from solidlsp.settings import SolidLSPSettings 15 | 16 | 17 | class RLanguageServer(SolidLanguageServer): 18 | """R Language Server implementation using the languageserver R package.""" 19 | 20 | @override 21 | def _get_wait_time_for_cross_file_referencing(self) -> float: 22 | return 5.0 # R language server needs extra time for workspace indexing in CI environments 23 | 24 | @override 25 | def is_ignored_dirname(self, dirname: str) -> bool: 26 | # For R projects, ignore common directories 27 | return super().is_ignored_dirname(dirname) or dirname in [ 28 | "renv", # R environment management 29 | "packrat", # Legacy R package management 30 | ".Rproj.user", # RStudio project files 31 | "vignettes", # Package vignettes (often large) 32 | ] 33 | 34 | @staticmethod 35 | def _check_r_installation(): 36 | """Check if R and languageserver are available.""" 37 | try: 38 | # Check R installation 39 | result = subprocess.run(["R", "--version"], capture_output=True, text=True, check=False) 40 | if result.returncode != 0: 41 | raise RuntimeError("R is not installed or not in PATH") 42 | 43 | # Check languageserver package 44 | result = subprocess.run( 45 | ["R", "--vanilla", "--quiet", "--slave", "-e", "if (!require('languageserver', quietly=TRUE)) quit(status=1)"], 46 | capture_output=True, 47 | text=True, 48 | check=False, 49 | ) 50 | 51 | if result.returncode != 0: 52 | raise RuntimeError( 53 | "R languageserver package is not installed.\nInstall it with: R -e \"install.packages('languageserver')\"" 54 | ) 55 | 56 | except FileNotFoundError: 57 | raise RuntimeError("R is not installed. Please install R from https://www.r-project.org/") 58 | 59 | def __init__( 60 | self, config: LanguageServerConfig, logger: LanguageServerLogger, repository_root_path: str, solidlsp_settings: SolidLSPSettings 61 | ): 62 | # Check R installation 63 | self._check_r_installation() 64 | 65 | # R command to start language server 66 | # Use --vanilla for minimal startup and --quiet to suppress all output except LSP 67 | # Set specific options to improve parsing stability 68 | r_cmd = 'R --vanilla --quiet --slave -e "options(languageserver.debug_mode = FALSE); languageserver::run()"' 69 | 70 | super().__init__( 71 | config, 72 | logger, 73 | repository_root_path, 74 | ProcessLaunchInfo(cmd=r_cmd, cwd=repository_root_path), 75 | "r", 76 | solidlsp_settings, 77 | ) 78 | self.server_ready = threading.Event() 79 | 80 | @staticmethod 81 | def _get_initialize_params(repository_absolute_path: str) -> InitializeParams: 82 | """Initialize params for R Language Server.""" 83 | root_uri = pathlib.Path(repository_absolute_path).as_uri() 84 | initialize_params = { 85 | "locale": "en", 86 | "capabilities": { 87 | "textDocument": { 88 | "synchronization": {"didSave": True, "dynamicRegistration": True}, 89 | "completion": { 90 | "dynamicRegistration": True, 91 | "completionItem": { 92 | "snippetSupport": True, 93 | "commitCharactersSupport": True, 94 | "documentationFormat": ["markdown", "plaintext"], 95 | "deprecatedSupport": True, 96 | "preselectSupport": True, 97 | }, 98 | }, 99 | "hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]}, 100 | "definition": {"dynamicRegistration": True}, 101 | "references": {"dynamicRegistration": True}, 102 | "documentSymbol": { 103 | "dynamicRegistration": True, 104 | "hierarchicalDocumentSymbolSupport": True, 105 | "symbolKind": {"valueSet": list(range(1, 27))}, 106 | }, 107 | "formatting": {"dynamicRegistration": True}, 108 | "rangeFormatting": {"dynamicRegistration": True}, 109 | }, 110 | "workspace": { 111 | "workspaceFolders": True, 112 | "didChangeConfiguration": {"dynamicRegistration": True}, 113 | "symbol": { 114 | "dynamicRegistration": True, 115 | "symbolKind": {"valueSet": list(range(1, 27))}, 116 | }, 117 | }, 118 | }, 119 | "processId": os.getpid(), 120 | "rootPath": repository_absolute_path, 121 | "rootUri": root_uri, 122 | "workspaceFolders": [ 123 | { 124 | "uri": root_uri, 125 | "name": os.path.basename(repository_absolute_path), 126 | } 127 | ], 128 | } 129 | return initialize_params 130 | 131 | def _start_server(self): 132 | """Start R Language Server process.""" 133 | 134 | def window_log_message(msg): 135 | self.logger.log(f"R LSP: window/logMessage: {msg}", logging.INFO) 136 | 137 | def do_nothing(params): 138 | return 139 | 140 | def register_capability_handler(params): 141 | return 142 | 143 | # Register LSP message handlers 144 | self.server.on_request("client/registerCapability", register_capability_handler) 145 | self.server.on_notification("window/logMessage", window_log_message) 146 | self.server.on_notification("$/progress", do_nothing) 147 | self.server.on_notification("textDocument/publishDiagnostics", do_nothing) 148 | 149 | self.logger.log("Starting R Language Server process", logging.INFO) 150 | self.server.start() 151 | 152 | initialize_params = self._get_initialize_params(self.repository_root_path) 153 | self.logger.log( 154 | "Sending initialize request to R Language Server", 155 | logging.INFO, 156 | ) 157 | 158 | init_response = self.server.send.initialize(initialize_params) 159 | 160 | # Verify server capabilities 161 | capabilities = init_response.get("capabilities", {}) 162 | assert "textDocumentSync" in capabilities 163 | if "completionProvider" in capabilities: 164 | self.logger.log("R LSP completion provider available", logging.INFO) 165 | if "definitionProvider" in capabilities: 166 | self.logger.log("R LSP definition provider available", logging.INFO) 167 | 168 | self.server.notify.initialized({}) 169 | self.completions_available.set() 170 | 171 | # R Language Server is ready after initialization 172 | self.server_ready.set() 173 | ``` -------------------------------------------------------------------------------- /src/serena/tools/jetbrains_tools.py: -------------------------------------------------------------------------------- ```python 1 | import json 2 | 3 | from serena.tools import Tool, ToolMarkerOptional, ToolMarkerSymbolicRead 4 | from serena.tools.jetbrains_plugin_client import JetBrainsPluginClient 5 | 6 | 7 | class JetBrainsFindSymbolTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional): 8 | """ 9 | Performs a global (or local) search for symbols with/containing a given name/substring (optionally filtered by type). 10 | """ 11 | 12 | def apply( 13 | self, 14 | name_path: str, 15 | depth: int = 0, 16 | relative_path: str | None = None, 17 | include_body: bool = False, 18 | max_answer_chars: int = -1, 19 | ) -> str: 20 | """ 21 | Retrieves information on all symbols/code entities (classes, methods, etc.) based on the given `name_path`, 22 | which represents a pattern for the symbol's path within the symbol tree of a single file. 23 | The returned symbol location can be used for edits or further queries. 24 | Specify `depth > 0` to retrieve children (e.g., methods of a class). 25 | 26 | The matching behavior is determined by the structure of `name_path`, which can 27 | either be a simple name (e.g. "method") or a name path like "class/method" (relative name path) 28 | or "/class/method" (absolute name path). 29 | Note that the name path is not a path in the file system but rather a path in the symbol tree 30 | **within a single file**. Thus, file or directory names should never be included in the `name_path`. 31 | For restricting the search to a single file or directory, pass the `relative_path` parameter. 32 | The retrieved symbols' `name_path` attribute will always be composed of symbol names, never file 33 | or directory names. 34 | 35 | Key aspects of the name path matching behavior: 36 | - The name of the retrieved symbols will match the last segment of `name_path`, while preceding segments 37 | will restrict the search to symbols that have a desired sequence of ancestors. 38 | - If there is no `/` in `name_path`, there is no restriction on the ancestor symbols. 39 | For example, passing `method` will match against all symbols with name paths like `method`, 40 | `class/method`, `class/nested_class/method`, etc. 41 | - If `name_path` contains at least one `/`, the matching is restricted to symbols 42 | with the respective ancestors. For example, passing `class/method` will match against 43 | `class/method` as well as `nested_class/class/method` but not `other_class/method`. 44 | - If `name_path` starts with a `/`, it will be treated as an absolute name path pattern, i.e. 45 | all ancestors are provided and must match. 46 | For example, passing `/class` will match only against top-level symbols named `class` but 47 | will not match `nested_class/class`. Passing `/class/method` will match `class/method` but 48 | not `outer_class/class/method`. 49 | 50 | :param name_path: The name path pattern to search for, see above for details. 51 | :param depth: Depth to retrieve descendants (e.g., 1 for class methods/attributes). 52 | :param relative_path: Optional. Restrict search to this file or directory. 53 | If None, searches entire codebase. 54 | If a directory is passed, the search will be restricted to the files in that directory. 55 | If a file is passed, the search will be restricted to that file. 56 | If you have some knowledge about the codebase, you should use this parameter, as it will significantly 57 | speed up the search as well as reduce the number of results. 58 | :param include_body: If True, include the symbol's source code. Use judiciously. 59 | :param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned. 60 | -1 means the default value from the config will be used. 61 | :return: JSON string: a list of symbols (with locations) matching the name. 62 | """ 63 | with JetBrainsPluginClient.from_project(self.project) as client: 64 | response_dict = client.find_symbol( 65 | name_path=name_path, 66 | relative_path=relative_path, 67 | depth=depth, 68 | include_body=include_body, 69 | ) 70 | result = json.dumps(response_dict) 71 | return self._limit_length(result, max_answer_chars) 72 | 73 | 74 | class JetBrainsFindReferencingSymbolsTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional): 75 | """ 76 | Finds symbols that reference the given symbol 77 | """ 78 | 79 | def apply( 80 | self, 81 | name_path: str, 82 | relative_path: str, 83 | max_answer_chars: int = -1, 84 | ) -> str: 85 | """ 86 | Finds symbols that reference the symbol at the given `name_path`. 87 | The result will contain metadata about the referencing symbols. 88 | 89 | :param name_path: name path of the symbol for which to find references; matching logic as described in find symbol tool. 90 | :param relative_path: the relative path to the file containing the symbol for which to find references. 91 | Note that here you can't pass a directory but must pass a file. 92 | :param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned. -1 means the 93 | default value from the config will be used. 94 | :return: a list of JSON objects with the symbols referencing the requested symbol 95 | """ 96 | with JetBrainsPluginClient.from_project(self.project) as client: 97 | response_dict = client.find_references( 98 | name_path=name_path, 99 | relative_path=relative_path, 100 | ) 101 | result = json.dumps(response_dict) 102 | return self._limit_length(result, max_answer_chars) 103 | 104 | 105 | class JetBrainsGetSymbolsOverviewTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional): 106 | """ 107 | Retrieves an overview of the top-level symbols within a specified file 108 | """ 109 | 110 | def apply( 111 | self, 112 | relative_path: str, 113 | max_answer_chars: int = -1, 114 | ) -> str: 115 | """ 116 | Gets an overview of the top-level symbols in the given file. 117 | Calling this is often a good idea before more targeted reading, searching or editing operations on the code symbols. 118 | Before requesting a symbol overview, it is usually a good idea to narrow down the scope of the overview 119 | by first understanding the basic directory structure of the repository that you can get from memories 120 | or by using the `list_dir` and `find_file` tools (or similar). 121 | 122 | :param relative_path: the relative path to the file to get the overview of 123 | :param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned. 124 | -1 means the default value from the config will be used. 125 | :return: a JSON object containing the symbols 126 | """ 127 | with JetBrainsPluginClient.from_project(self.project) as client: 128 | response_dict = client.get_symbols_overview( 129 | relative_path=relative_path, 130 | ) 131 | result = json.dumps(response_dict) 132 | return self._limit_length(result, max_answer_chars) 133 | ``` -------------------------------------------------------------------------------- /test/solidlsp/elixir/test_elixir_ignored_dirs.py: -------------------------------------------------------------------------------- ```python 1 | from collections.abc import Generator 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | from solidlsp import SolidLanguageServer 7 | from solidlsp.ls_config import Language 8 | from test.conftest import create_ls 9 | 10 | from . import NEXTLS_UNAVAILABLE, NEXTLS_UNAVAILABLE_REASON 11 | 12 | # These marks will be applied to all tests in this module 13 | pytestmark = [pytest.mark.elixir, pytest.mark.skipif(NEXTLS_UNAVAILABLE, reason=f"Next LS not available: {NEXTLS_UNAVAILABLE_REASON}")] 14 | 15 | 16 | @pytest.fixture(scope="module") 17 | def ls_with_ignored_dirs() -> Generator[SolidLanguageServer, None, None]: 18 | """Fixture to set up an LS for the elixir test repo with the 'scripts' directory ignored.""" 19 | ignored_paths = ["scripts", "ignored_dir"] 20 | ls = create_ls(ignored_paths=ignored_paths, language=Language.ELIXIR) 21 | ls.start() 22 | try: 23 | yield ls 24 | finally: 25 | ls.stop() 26 | 27 | 28 | @pytest.mark.parametrize("ls_with_ignored_dirs", [Language.ELIXIR], indirect=True) 29 | def test_symbol_tree_ignores_dir(ls_with_ignored_dirs: SolidLanguageServer): 30 | """Tests that request_full_symbol_tree ignores the configured directory.""" 31 | root = ls_with_ignored_dirs.request_full_symbol_tree()[0] 32 | root_children = root["children"] 33 | children_names = {child["name"] for child in root_children} 34 | 35 | # Should have lib and test directories, but not scripts or ignored_dir 36 | expected_dirs = {"lib", "test"} 37 | assert expected_dirs.issubset(children_names), f"Expected {expected_dirs} to be in {children_names}" 38 | assert "scripts" not in children_names, f"scripts should not be in {children_names}" 39 | assert "ignored_dir" not in children_names, f"ignored_dir should not be in {children_names}" 40 | 41 | 42 | @pytest.mark.parametrize("ls_with_ignored_dirs", [Language.ELIXIR], indirect=True) 43 | def test_find_references_ignores_dir(ls_with_ignored_dirs: SolidLanguageServer): 44 | """Tests that find_references ignores the configured directory.""" 45 | # Location of User struct, which is referenced in scripts and ignored_dir 46 | definition_file = "lib/models.ex" 47 | 48 | # Find the User struct definition 49 | symbols = ls_with_ignored_dirs.request_document_symbols(definition_file) 50 | user_symbol = None 51 | for symbol_group in symbols: 52 | user_symbol = next((s for s in symbol_group if "User" in s.get("name", "")), None) 53 | if user_symbol: 54 | break 55 | 56 | if not user_symbol or "selectionRange" not in user_symbol: 57 | pytest.skip("User symbol not found for reference testing") 58 | 59 | sel_start = user_symbol["selectionRange"]["start"] 60 | references = ls_with_ignored_dirs.request_references(definition_file, sel_start["line"], sel_start["character"]) 61 | 62 | # Assert that scripts and ignored_dir do not appear in the references 63 | assert not any("scripts" in ref["relativePath"] for ref in references), "scripts should be ignored" 64 | assert not any("ignored_dir" in ref["relativePath"] for ref in references), "ignored_dir should be ignored" 65 | 66 | 67 | @pytest.mark.parametrize("repo_path", [Language.ELIXIR], indirect=True) 68 | def test_refs_and_symbols_with_glob_patterns(repo_path: Path) -> None: 69 | """Tests that refs and symbols with glob patterns are ignored.""" 70 | ignored_paths = ["*cripts", "ignored_*"] # codespell:ignore cripts 71 | ls = create_ls(ignored_paths=ignored_paths, repo_path=str(repo_path), language=Language.ELIXIR) 72 | ls.start() 73 | 74 | try: 75 | # Same as in the above tests 76 | root = ls.request_full_symbol_tree()[0] 77 | root_children = root["children"] 78 | children_names = {child["name"] for child in root_children} 79 | 80 | # Should have lib and test directories, but not scripts or ignored_dir 81 | expected_dirs = {"lib", "test"} 82 | assert expected_dirs.issubset(children_names), f"Expected {expected_dirs} to be in {children_names}" 83 | assert "scripts" not in children_names, f"scripts should not be in {children_names} (glob pattern)" 84 | assert "ignored_dir" not in children_names, f"ignored_dir should not be in {children_names} (glob pattern)" 85 | 86 | # Test that the refs and symbols with glob patterns are ignored 87 | definition_file = "lib/models.ex" 88 | 89 | # Find the User struct definition 90 | symbols = ls.request_document_symbols(definition_file) 91 | user_symbol = None 92 | for symbol_group in symbols: 93 | user_symbol = next((s for s in symbol_group if "User" in s.get("name", "")), None) 94 | if user_symbol: 95 | break 96 | 97 | if user_symbol and "selectionRange" in user_symbol: 98 | sel_start = user_symbol["selectionRange"]["start"] 99 | references = ls.request_references(definition_file, sel_start["line"], sel_start["character"]) 100 | 101 | # Assert that scripts and ignored_dir do not appear in references 102 | assert not any("scripts" in ref["relativePath"] for ref in references), "scripts should be ignored (glob)" 103 | assert not any("ignored_dir" in ref["relativePath"] for ref in references), "ignored_dir should be ignored (glob)" 104 | finally: 105 | ls.stop() 106 | 107 | 108 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 109 | def test_default_ignored_directories(language_server: SolidLanguageServer): 110 | """Test that default Elixir directories are ignored.""" 111 | # Test that Elixir-specific directories are ignored by default 112 | assert language_server.is_ignored_dirname("_build"), "_build should be ignored" 113 | assert language_server.is_ignored_dirname("deps"), "deps should be ignored" 114 | assert language_server.is_ignored_dirname(".elixir_ls"), ".elixir_ls should be ignored" 115 | assert language_server.is_ignored_dirname("cover"), "cover should be ignored" 116 | assert language_server.is_ignored_dirname("node_modules"), "node_modules should be ignored" 117 | 118 | # Test that important directories are not ignored 119 | assert not language_server.is_ignored_dirname("lib"), "lib should not be ignored" 120 | assert not language_server.is_ignored_dirname("test"), "test should not be ignored" 121 | assert not language_server.is_ignored_dirname("config"), "config should not be ignored" 122 | assert not language_server.is_ignored_dirname("priv"), "priv should not be ignored" 123 | 124 | 125 | @pytest.mark.parametrize("language_server", [Language.ELIXIR], indirect=True) 126 | def test_symbol_tree_excludes_build_dirs(language_server: SolidLanguageServer): 127 | """Test that symbol tree excludes build and dependency directories.""" 128 | symbol_tree = language_server.request_full_symbol_tree() 129 | 130 | if symbol_tree: 131 | root = symbol_tree[0] 132 | children_names = {child["name"] for child in root.get("children", [])} 133 | 134 | # Build and dependency directories should not appear 135 | ignored_dirs = {"_build", "deps", ".elixir_ls", "cover", "node_modules"} 136 | found_ignored = ignored_dirs.intersection(children_names) 137 | assert len(found_ignored) == 0, f"Found ignored directories in symbol tree: {found_ignored}" 138 | 139 | # Important directories should appear 140 | important_dirs = {"lib", "test"} 141 | found_important = important_dirs.intersection(children_names) 142 | assert len(found_important) > 0, f"Expected to find important directories: {important_dirs}, got: {children_names}" 143 | ```