This is page 6 of 17. Use http://codebase.md/oraios/serena?page={x} to view the full context.
# Directory Structure
```
├── .devcontainer
│ └── devcontainer.json
├── .dockerignore
├── .env.example
├── .github
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── config.yml
│ │ ├── feature_request.md
│ │ └── issue--bug--performance-problem--question-.md
│ └── workflows
│ ├── codespell.yml
│ ├── docker.yml
│ ├── docs.yaml
│ ├── junie.yml
│ ├── publish.yml
│ └── pytest.yml
├── .gitignore
├── .serena
│ ├── .gitignore
│ ├── memories
│ │ ├── adding_new_language_support_guide.md
│ │ ├── serena_core_concepts_and_architecture.md
│ │ ├── serena_repository_structure.md
│ │ └── suggested_commands.md
│ └── project.yml
├── .vscode
│ └── settings.json
├── CHANGELOG.md
├── CLAUDE.md
├── compose.yaml
├── CONTRIBUTING.md
├── docker_build_and_run.sh
├── DOCKER.md
├── Dockerfile
├── docs
│ ├── _config.yml
│ ├── _static
│ │ └── images
│ │ └── jetbrains-marketplace-button.png
│ ├── .gitignore
│ ├── 01-about
│ │ ├── 000_intro.md
│ │ ├── 010_llm-integration.md
│ │ ├── 020_programming-languages.md
│ │ ├── 030_serena-in-action.md
│ │ ├── 035_tools.md
│ │ ├── 040_comparison-to-other-agents.md
│ │ └── 050_acknowledgements.md
│ ├── 02-usage
│ │ ├── 000_intro.md
│ │ ├── 010_prerequisites.md
│ │ ├── 020_running.md
│ │ ├── 025_jetbrains_plugin.md
│ │ ├── 030_clients.md
│ │ ├── 040_workflow.md
│ │ ├── 050_configuration.md
│ │ ├── 060_dashboard.md
│ │ ├── 070_security.md
│ │ └── 999_additional-usage.md
│ ├── 03-special-guides
│ │ ├── 000_intro.md
│ │ ├── custom_agent.md
│ │ ├── groovy_setup_guide_for_serena.md
│ │ ├── scala_setup_guide_for_serena.md
│ │ └── serena_on_chatgpt.md
│ ├── autogen_rst.py
│ ├── create_toc.py
│ └── index.md
├── flake.lock
├── flake.nix
├── lessons_learned.md
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── repo_dir_sync.py
├── resources
│ ├── jetbrains-marketplace-button.cdr
│ ├── serena-icons.cdr
│ ├── serena-logo-dark-mode.svg
│ ├── serena-logo.cdr
│ ├── serena-logo.svg
│ └── vscode_sponsor_logo.png
├── roadmap.md
├── scripts
│ ├── agno_agent.py
│ ├── demo_run_tools.py
│ ├── gen_prompt_factory.py
│ ├── mcp_server.py
│ ├── print_mode_context_options.py
│ ├── print_tool_overview.py
│ └── profile_tool_call.py
├── src
│ ├── interprompt
│ │ ├── __init__.py
│ │ ├── .syncCommitId.remote
│ │ ├── .syncCommitId.this
│ │ ├── jinja_template.py
│ │ ├── multilang_prompt.py
│ │ ├── prompt_factory.py
│ │ └── util
│ │ ├── __init__.py
│ │ └── class_decorators.py
│ ├── README.md
│ ├── serena
│ │ ├── __init__.py
│ │ ├── agent.py
│ │ ├── agno.py
│ │ ├── analytics.py
│ │ ├── cli.py
│ │ ├── code_editor.py
│ │ ├── config
│ │ │ ├── __init__.py
│ │ │ ├── context_mode.py
│ │ │ └── serena_config.py
│ │ ├── constants.py
│ │ ├── dashboard.py
│ │ ├── generated
│ │ │ └── generated_prompt_factory.py
│ │ ├── gui_log_viewer.py
│ │ ├── ls_manager.py
│ │ ├── mcp.py
│ │ ├── project.py
│ │ ├── prompt_factory.py
│ │ ├── resources
│ │ │ ├── config
│ │ │ │ ├── contexts
│ │ │ │ │ ├── agent.yml
│ │ │ │ │ ├── chatgpt.yml
│ │ │ │ │ ├── claude-code.yml
│ │ │ │ │ ├── codex.yml
│ │ │ │ │ ├── context.template.yml
│ │ │ │ │ ├── desktop-app.yml
│ │ │ │ │ ├── ide.yml
│ │ │ │ │ └── oaicompat-agent.yml
│ │ │ │ ├── internal_modes
│ │ │ │ │ └── jetbrains.yml
│ │ │ │ ├── modes
│ │ │ │ │ ├── editing.yml
│ │ │ │ │ ├── interactive.yml
│ │ │ │ │ ├── mode.template.yml
│ │ │ │ │ ├── no-memories.yml
│ │ │ │ │ ├── no-onboarding.yml
│ │ │ │ │ ├── onboarding.yml
│ │ │ │ │ ├── one-shot.yml
│ │ │ │ │ └── planning.yml
│ │ │ │ └── prompt_templates
│ │ │ │ ├── simple_tool_outputs.yml
│ │ │ │ └── system_prompt.yml
│ │ │ ├── dashboard
│ │ │ │ ├── dashboard.css
│ │ │ │ ├── dashboard.js
│ │ │ │ ├── index.html
│ │ │ │ ├── jquery.min.js
│ │ │ │ ├── news
│ │ │ │ │ └── 20260111.html
│ │ │ │ ├── serena-icon-16.png
│ │ │ │ ├── serena-icon-32.png
│ │ │ │ ├── serena-icon-48.png
│ │ │ │ ├── serena-logo-dark-mode.svg
│ │ │ │ ├── serena-logo.svg
│ │ │ │ ├── serena-logs-dark-mode.png
│ │ │ │ └── serena-logs.png
│ │ │ ├── project.template.yml
│ │ │ └── serena_config.template.yml
│ │ ├── symbol.py
│ │ ├── task_executor.py
│ │ ├── text_utils.py
│ │ ├── tools
│ │ │ ├── __init__.py
│ │ │ ├── cmd_tools.py
│ │ │ ├── config_tools.py
│ │ │ ├── file_tools.py
│ │ │ ├── jetbrains_plugin_client.py
│ │ │ ├── jetbrains_tools.py
│ │ │ ├── jetbrains_types.py
│ │ │ ├── memory_tools.py
│ │ │ ├── symbol_tools.py
│ │ │ ├── tools_base.py
│ │ │ └── workflow_tools.py
│ │ └── util
│ │ ├── class_decorators.py
│ │ ├── cli_util.py
│ │ ├── exception.py
│ │ ├── file_system.py
│ │ ├── general.py
│ │ ├── git.py
│ │ ├── gui.py
│ │ ├── inspection.py
│ │ ├── logging.py
│ │ ├── shell.py
│ │ ├── thread.py
│ │ └── version.py
│ └── solidlsp
│ ├── __init__.py
│ ├── .gitignore
│ ├── language_servers
│ │ ├── al_language_server.py
│ │ ├── bash_language_server.py
│ │ ├── clangd_language_server.py
│ │ ├── clojure_lsp.py
│ │ ├── common.py
│ │ ├── csharp_language_server.py
│ │ ├── dart_language_server.py
│ │ ├── eclipse_jdtls.py
│ │ ├── elixir_tools
│ │ │ ├── __init__.py
│ │ │ ├── elixir_tools.py
│ │ │ └── README.md
│ │ ├── elm_language_server.py
│ │ ├── erlang_language_server.py
│ │ ├── fortran_language_server.py
│ │ ├── fsharp_language_server.py
│ │ ├── gopls.py
│ │ ├── groovy_language_server.py
│ │ ├── haskell_language_server.py
│ │ ├── intelephense.py
│ │ ├── jedi_server.py
│ │ ├── julia_server.py
│ │ ├── kotlin_language_server.py
│ │ ├── lua_ls.py
│ │ ├── marksman.py
│ │ ├── matlab_language_server.py
│ │ ├── nixd_ls.py
│ │ ├── omnisharp
│ │ │ ├── initialize_params.json
│ │ │ ├── runtime_dependencies.json
│ │ │ └── workspace_did_change_configuration.json
│ │ ├── omnisharp.py
│ │ ├── pascal_server.py
│ │ ├── perl_language_server.py
│ │ ├── powershell_language_server.py
│ │ ├── pyright_server.py
│ │ ├── r_language_server.py
│ │ ├── regal_server.py
│ │ ├── ruby_lsp.py
│ │ ├── rust_analyzer.py
│ │ ├── scala_language_server.py
│ │ ├── solargraph.py
│ │ ├── sourcekit_lsp.py
│ │ ├── taplo_server.py
│ │ ├── terraform_ls.py
│ │ ├── typescript_language_server.py
│ │ ├── vts_language_server.py
│ │ ├── vue_language_server.py
│ │ ├── yaml_language_server.py
│ │ └── zls.py
│ ├── ls_config.py
│ ├── ls_exceptions.py
│ ├── ls_handler.py
│ ├── ls_request.py
│ ├── ls_types.py
│ ├── ls_utils.py
│ ├── ls.py
│ ├── lsp_protocol_handler
│ │ ├── lsp_constants.py
│ │ ├── lsp_requests.py
│ │ ├── lsp_types.py
│ │ └── server.py
│ ├── settings.py
│ └── util
│ ├── cache.py
│ ├── subprocess_util.py
│ └── zip.py
├── sync.py
├── test
│ ├── __init__.py
│ ├── conftest.py
│ ├── resources
│ │ └── repos
│ │ ├── al
│ │ │ └── test_repo
│ │ │ ├── app.json
│ │ │ └── src
│ │ │ ├── Codeunits
│ │ │ │ ├── CustomerMgt.Codeunit.al
│ │ │ │ └── PaymentProcessorImpl.Codeunit.al
│ │ │ ├── Enums
│ │ │ │ └── CustomerType.Enum.al
│ │ │ ├── Interfaces
│ │ │ │ └── IPaymentProcessor.Interface.al
│ │ │ ├── Pages
│ │ │ │ ├── CustomerCard.Page.al
│ │ │ │ └── CustomerList.Page.al
│ │ │ ├── TableExtensions
│ │ │ │ └── Item.TableExt.al
│ │ │ └── Tables
│ │ │ └── Customer.Table.al
│ │ ├── bash
│ │ │ └── test_repo
│ │ │ ├── config.sh
│ │ │ ├── main.sh
│ │ │ └── utils.sh
│ │ ├── clojure
│ │ │ └── test_repo
│ │ │ ├── deps.edn
│ │ │ └── src
│ │ │ └── test_app
│ │ │ ├── core.clj
│ │ │ └── utils.clj
│ │ ├── csharp
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── Models
│ │ │ │ └── Person.cs
│ │ │ ├── Program.cs
│ │ │ ├── serena.sln
│ │ │ └── TestProject.csproj
│ │ ├── dart
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── lib
│ │ │ │ ├── helper.dart
│ │ │ │ ├── main.dart
│ │ │ │ └── models.dart
│ │ │ └── pubspec.yaml
│ │ ├── elixir
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── lib
│ │ │ │ ├── examples.ex
│ │ │ │ ├── ignored_dir
│ │ │ │ │ └── ignored_module.ex
│ │ │ │ ├── models.ex
│ │ │ │ ├── services.ex
│ │ │ │ ├── test_repo.ex
│ │ │ │ └── utils.ex
│ │ │ ├── mix.exs
│ │ │ ├── mix.lock
│ │ │ ├── scripts
│ │ │ │ └── build_script.ex
│ │ │ └── test
│ │ │ ├── models_test.exs
│ │ │ └── test_repo_test.exs
│ │ ├── elm
│ │ │ └── test_repo
│ │ │ ├── elm.json
│ │ │ ├── Main.elm
│ │ │ └── Utils.elm
│ │ ├── erlang
│ │ │ └── test_repo
│ │ │ ├── hello.erl
│ │ │ ├── ignored_dir
│ │ │ │ └── ignored_module.erl
│ │ │ ├── include
│ │ │ │ ├── records.hrl
│ │ │ │ └── types.hrl
│ │ │ ├── math_utils.erl
│ │ │ ├── rebar.config
│ │ │ ├── src
│ │ │ │ ├── app.erl
│ │ │ │ ├── models.erl
│ │ │ │ ├── services.erl
│ │ │ │ └── utils.erl
│ │ │ └── test
│ │ │ ├── models_tests.erl
│ │ │ └── utils_tests.erl
│ │ ├── fortran
│ │ │ └── test_repo
│ │ │ ├── main.f90
│ │ │ └── modules
│ │ │ ├── geometry.f90
│ │ │ └── math_utils.f90
│ │ ├── fsharp
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── Calculator.fs
│ │ │ ├── Models
│ │ │ │ └── Person.fs
│ │ │ ├── Program.fs
│ │ │ ├── README.md
│ │ │ └── TestProject.fsproj
│ │ ├── go
│ │ │ └── test_repo
│ │ │ └── main.go
│ │ ├── groovy
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── build.gradle
│ │ │ └── src
│ │ │ └── main
│ │ │ └── groovy
│ │ │ └── com
│ │ │ └── example
│ │ │ ├── Main.groovy
│ │ │ ├── Model.groovy
│ │ │ ├── ModelUser.groovy
│ │ │ └── Utils.groovy
│ │ ├── haskell
│ │ │ └── test_repo
│ │ │ ├── app
│ │ │ │ └── Main.hs
│ │ │ ├── haskell-test-repo.cabal
│ │ │ ├── package.yaml
│ │ │ ├── src
│ │ │ │ ├── Calculator.hs
│ │ │ │ └── Helper.hs
│ │ │ └── stack.yaml
│ │ ├── java
│ │ │ └── test_repo
│ │ │ ├── pom.xml
│ │ │ └── src
│ │ │ └── main
│ │ │ └── java
│ │ │ └── test_repo
│ │ │ ├── Main.java
│ │ │ ├── Model.java
│ │ │ ├── ModelUser.java
│ │ │ └── Utils.java
│ │ ├── julia
│ │ │ └── test_repo
│ │ │ ├── lib
│ │ │ │ └── helper.jl
│ │ │ └── main.jl
│ │ ├── kotlin
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── build.gradle.kts
│ │ │ └── src
│ │ │ └── main
│ │ │ └── kotlin
│ │ │ └── test_repo
│ │ │ ├── Main.kt
│ │ │ ├── Model.kt
│ │ │ ├── ModelUser.kt
│ │ │ └── Utils.kt
│ │ ├── lua
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── main.lua
│ │ │ ├── src
│ │ │ │ ├── calculator.lua
│ │ │ │ └── utils.lua
│ │ │ └── tests
│ │ │ └── test_calculator.lua
│ │ ├── markdown
│ │ │ └── test_repo
│ │ │ ├── api.md
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── guide.md
│ │ │ └── README.md
│ │ ├── matlab
│ │ │ └── test_repo
│ │ │ ├── Calculator.m
│ │ │ └── main.m
│ │ ├── nix
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── default.nix
│ │ │ ├── flake.nix
│ │ │ ├── lib
│ │ │ │ └── utils.nix
│ │ │ ├── modules
│ │ │ │ └── example.nix
│ │ │ └── scripts
│ │ │ └── hello.sh
│ │ ├── pascal
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── lib
│ │ │ │ └── helper.pas
│ │ │ └── main.pas
│ │ ├── perl
│ │ │ └── test_repo
│ │ │ ├── helper.pl
│ │ │ └── main.pl
│ │ ├── php
│ │ │ └── test_repo
│ │ │ ├── helper.php
│ │ │ ├── index.php
│ │ │ └── simple_var.php
│ │ ├── powershell
│ │ │ └── test_repo
│ │ │ ├── main.ps1
│ │ │ ├── PowerShellEditorServices.json
│ │ │ └── utils.ps1
│ │ ├── python
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── custom_test
│ │ │ │ ├── __init__.py
│ │ │ │ └── advanced_features.py
│ │ │ ├── examples
│ │ │ │ ├── __init__.py
│ │ │ │ └── user_management.py
│ │ │ ├── ignore_this_dir_with_postfix
│ │ │ │ └── ignored_module.py
│ │ │ ├── scripts
│ │ │ │ ├── __init__.py
│ │ │ │ └── run_app.py
│ │ │ └── test_repo
│ │ │ ├── __init__.py
│ │ │ ├── complex_types.py
│ │ │ ├── models.py
│ │ │ ├── name_collisions.py
│ │ │ ├── nested_base.py
│ │ │ ├── nested.py
│ │ │ ├── overloaded.py
│ │ │ ├── services.py
│ │ │ ├── utils.py
│ │ │ └── variables.py
│ │ ├── r
│ │ │ └── test_repo
│ │ │ ├── .Rbuildignore
│ │ │ ├── DESCRIPTION
│ │ │ ├── examples
│ │ │ │ └── analysis.R
│ │ │ ├── NAMESPACE
│ │ │ └── R
│ │ │ ├── models.R
│ │ │ └── utils.R
│ │ ├── rego
│ │ │ └── test_repo
│ │ │ ├── policies
│ │ │ │ ├── authz.rego
│ │ │ │ └── validation.rego
│ │ │ └── utils
│ │ │ └── helpers.rego
│ │ ├── ruby
│ │ │ └── test_repo
│ │ │ ├── .solargraph.yml
│ │ │ ├── examples
│ │ │ │ └── user_management.rb
│ │ │ ├── lib.rb
│ │ │ ├── main.rb
│ │ │ ├── models.rb
│ │ │ ├── nested.rb
│ │ │ ├── services.rb
│ │ │ └── variables.rb
│ │ ├── rust
│ │ │ ├── test_repo
│ │ │ │ ├── Cargo.lock
│ │ │ │ ├── Cargo.toml
│ │ │ │ └── src
│ │ │ │ ├── lib.rs
│ │ │ │ └── main.rs
│ │ │ └── test_repo_2024
│ │ │ ├── Cargo.lock
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── lib.rs
│ │ │ └── main.rs
│ │ ├── scala
│ │ │ ├── build.sbt
│ │ │ ├── project
│ │ │ │ ├── build.properties
│ │ │ │ ├── metals.sbt
│ │ │ │ └── plugins.sbt
│ │ │ └── src
│ │ │ └── main
│ │ │ └── scala
│ │ │ └── com
│ │ │ └── example
│ │ │ ├── Main.scala
│ │ │ └── Utils.scala
│ │ ├── swift
│ │ │ └── test_repo
│ │ │ ├── Package.swift
│ │ │ └── src
│ │ │ ├── main.swift
│ │ │ └── utils.swift
│ │ ├── terraform
│ │ │ └── test_repo
│ │ │ ├── data.tf
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ └── variables.tf
│ │ ├── toml
│ │ │ └── test_repo
│ │ │ ├── Cargo.toml
│ │ │ ├── config.toml
│ │ │ └── pyproject.toml
│ │ ├── typescript
│ │ │ └── test_repo
│ │ │ ├── .serena
│ │ │ │ └── project.yml
│ │ │ ├── index.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── use_helper.ts
│ │ │ └── ws_manager.js
│ │ ├── vue
│ │ │ └── test_repo
│ │ │ ├── .gitignore
│ │ │ ├── index.html
│ │ │ ├── package.json
│ │ │ ├── src
│ │ │ │ ├── App.vue
│ │ │ │ ├── components
│ │ │ │ │ ├── CalculatorButton.vue
│ │ │ │ │ ├── CalculatorDisplay.vue
│ │ │ │ │ └── CalculatorInput.vue
│ │ │ │ ├── composables
│ │ │ │ │ ├── useFormatter.ts
│ │ │ │ │ └── useTheme.ts
│ │ │ │ ├── main.ts
│ │ │ │ ├── stores
│ │ │ │ │ └── calculator.ts
│ │ │ │ └── types
│ │ │ │ └── index.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── tsconfig.node.json
│ │ │ └── vite.config.ts
│ │ ├── yaml
│ │ │ └── test_repo
│ │ │ ├── config.yaml
│ │ │ ├── data.yaml
│ │ │ └── services.yml
│ │ └── zig
│ │ └── test_repo
│ │ ├── .gitignore
│ │ ├── build.zig
│ │ ├── src
│ │ │ ├── calculator.zig
│ │ │ ├── main.zig
│ │ │ └── math_utils.zig
│ │ └── zls.json
│ ├── serena
│ │ ├── __init__.py
│ │ ├── __snapshots__
│ │ │ └── test_symbol_editing.ambr
│ │ ├── config
│ │ │ ├── __init__.py
│ │ │ └── test_serena_config.py
│ │ ├── test_cli_project_commands.py
│ │ ├── test_edit_marker.py
│ │ ├── test_mcp.py
│ │ ├── test_serena_agent.py
│ │ ├── test_symbol_editing.py
│ │ ├── test_symbol.py
│ │ ├── test_task_executor.py
│ │ ├── test_text_utils.py
│ │ ├── test_tool_parameter_types.py
│ │ └── util
│ │ ├── test_exception.py
│ │ └── test_file_system.py
│ └── solidlsp
│ ├── al
│ │ └── test_al_basic.py
│ ├── bash
│ │ ├── __init__.py
│ │ └── test_bash_basic.py
│ ├── clojure
│ │ ├── __init__.py
│ │ └── test_clojure_basic.py
│ ├── csharp
│ │ └── test_csharp_basic.py
│ ├── dart
│ │ ├── __init__.py
│ │ └── test_dart_basic.py
│ ├── elixir
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_elixir_basic.py
│ │ ├── test_elixir_ignored_dirs.py
│ │ ├── test_elixir_integration.py
│ │ └── test_elixir_symbol_retrieval.py
│ ├── elm
│ │ └── test_elm_basic.py
│ ├── erlang
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_erlang_basic.py
│ │ ├── test_erlang_ignored_dirs.py
│ │ └── test_erlang_symbol_retrieval.py
│ ├── fortran
│ │ ├── __init__.py
│ │ └── test_fortran_basic.py
│ ├── fsharp
│ │ └── test_fsharp_basic.py
│ ├── go
│ │ └── test_go_basic.py
│ ├── groovy
│ │ └── test_groovy_basic.py
│ ├── haskell
│ │ ├── __init__.py
│ │ └── test_haskell_basic.py
│ ├── java
│ │ └── test_java_basic.py
│ ├── julia
│ │ └── test_julia_basic.py
│ ├── kotlin
│ │ └── test_kotlin_basic.py
│ ├── lua
│ │ └── test_lua_basic.py
│ ├── markdown
│ │ ├── __init__.py
│ │ └── test_markdown_basic.py
│ ├── matlab
│ │ ├── __init__.py
│ │ └── test_matlab_basic.py
│ ├── nix
│ │ └── test_nix_basic.py
│ ├── pascal
│ │ ├── __init__.py
│ │ └── test_pascal_basic.py
│ ├── perl
│ │ └── test_perl_basic.py
│ ├── php
│ │ └── test_php_basic.py
│ ├── powershell
│ │ ├── __init__.py
│ │ └── test_powershell_basic.py
│ ├── python
│ │ ├── test_python_basic.py
│ │ ├── test_retrieval_with_ignored_dirs.py
│ │ └── test_symbol_retrieval.py
│ ├── r
│ │ ├── __init__.py
│ │ └── test_r_basic.py
│ ├── rego
│ │ └── test_rego_basic.py
│ ├── ruby
│ │ ├── test_ruby_basic.py
│ │ └── test_ruby_symbol_retrieval.py
│ ├── rust
│ │ ├── test_rust_2024_edition.py
│ │ ├── test_rust_analyzer_detection.py
│ │ └── test_rust_basic.py
│ ├── scala
│ │ └── test_scala_language_server.py
│ ├── swift
│ │ └── test_swift_basic.py
│ ├── terraform
│ │ └── test_terraform_basic.py
│ ├── test_lsp_protocol_handler_server.py
│ ├── toml
│ │ ├── __init__.py
│ │ ├── test_toml_basic.py
│ │ ├── test_toml_edge_cases.py
│ │ ├── test_toml_ignored_dirs.py
│ │ └── test_toml_symbol_retrieval.py
│ ├── typescript
│ │ └── test_typescript_basic.py
│ ├── util
│ │ └── test_zip.py
│ ├── vue
│ │ ├── __init__.py
│ │ ├── test_vue_basic.py
│ │ ├── test_vue_error_cases.py
│ │ ├── test_vue_rename.py
│ │ └── test_vue_symbol_retrieval.py
│ ├── yaml_ls
│ │ ├── __init__.py
│ │ └── test_yaml_basic.py
│ └── zig
│ └── test_zig_basic.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/src/solidlsp/language_servers/groovy_language_server.py:
--------------------------------------------------------------------------------
```python
"""
Provides Groovy specific instantiation of the LanguageServer class. Contains various configurations and settings specific to Groovy.
"""
import dataclasses
import logging
import os
import pathlib
import shlex
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import Language, LanguageServerConfig
from solidlsp.ls_utils import FileUtils, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
@dataclasses.dataclass
class GroovyRuntimeDependencyPaths:
"""
Stores the paths to the runtime dependencies of Groovy Language Server
"""
java_path: str
java_home_path: str
ls_jar_path: str
groovy_home_path: str | None = None
class GroovyLanguageServer(SolidLanguageServer):
"""
Provides Groovy specific instantiation of the LanguageServer class.
Contains various configurations and settings specific to Groovy.
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a Groovy Language Server instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
runtime_dependency_paths = self._setup_runtime_dependencies(solidlsp_settings)
self.runtime_dependency_paths = runtime_dependency_paths
# Get jar options from configuration
ls_jar_options = []
if solidlsp_settings.ls_specific_settings:
groovy_settings = solidlsp_settings.get_ls_specific_settings(Language.GROOVY)
jar_options_str = groovy_settings.get("ls_jar_options", "")
if jar_options_str:
ls_jar_options = shlex.split(jar_options_str)
log.info(f"Using Groovy LS JAR options from configuration: {jar_options_str}")
# Create command to execute the Groovy Language Server
cmd = [self.runtime_dependency_paths.java_path, "-jar", self.runtime_dependency_paths.ls_jar_path]
cmd.extend(ls_jar_options)
# Set environment variables including JAVA_HOME
proc_env = {"JAVA_HOME": self.runtime_dependency_paths.java_home_path}
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=cmd, env=proc_env, cwd=repository_root_path),
"groovy",
solidlsp_settings,
)
log.info(f"Starting Groovy Language Server with jar options: {ls_jar_options}")
@classmethod
def _setup_runtime_dependencies(cls, solidlsp_settings: SolidLSPSettings) -> GroovyRuntimeDependencyPaths:
"""
Setup runtime dependencies for Groovy Language Server and return paths.
"""
platform_id = PlatformUtils.get_platform_id()
# Verify platform support
assert (
platform_id.value.startswith("win-") or platform_id.value.startswith("linux-") or platform_id.value.startswith("osx-")
), "Only Windows, Linux and macOS platforms are supported for Groovy in multilspy at the moment"
# Check if user specified custom Java home path
java_home_path = None
java_path = None
if solidlsp_settings and solidlsp_settings.ls_specific_settings:
groovy_settings = solidlsp_settings.get_ls_specific_settings(Language.GROOVY)
custom_java_home = groovy_settings.get("ls_java_home_path")
if custom_java_home:
log.info(f"Using custom Java home path from configuration: {custom_java_home}")
java_home_path = custom_java_home
# Determine java executable path based on platform
if platform_id.value.startswith("win-"):
java_path = os.path.join(java_home_path, "bin", "java.exe")
else:
java_path = os.path.join(java_home_path, "bin", "java")
# If no custom Java home path, download and use bundled Java
if java_home_path is None:
# Runtime dependency information
runtime_dependencies = {
"java": {
"win-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-win32-x64-1.42.0-561.vsix",
"archiveType": "zip",
"java_home_path": "extension/jre/21.0.7-win32-x86_64",
"java_path": "extension/jre/21.0.7-win32-x86_64/bin/java.exe",
},
"linux-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-linux-x64-1.42.0-561.vsix",
"archiveType": "zip",
"java_home_path": "extension/jre/21.0.7-linux-x86_64",
"java_path": "extension/jre/21.0.7-linux-x86_64/bin/java",
},
"linux-arm64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-linux-arm64-1.42.0-561.vsix",
"archiveType": "zip",
"java_home_path": "extension/jre/21.0.7-linux-aarch64",
"java_path": "extension/jre/21.0.7-linux-aarch64/bin/java",
},
"osx-x64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-darwin-x64-1.42.0-561.vsix",
"archiveType": "zip",
"java_home_path": "extension/jre/21.0.7-macosx-x86_64",
"java_path": "extension/jre/21.0.7-macosx-x86_64/bin/java",
},
"osx-arm64": {
"url": "https://github.com/redhat-developer/vscode-java/releases/download/v1.42.0/java-darwin-arm64-1.42.0-561.vsix",
"archiveType": "zip",
"java_home_path": "extension/jre/21.0.7-macosx-aarch64",
"java_path": "extension/jre/21.0.7-macosx-aarch64/bin/java",
},
},
}
java_dependency = runtime_dependencies["java"][platform_id.value]
static_dir = os.path.join(cls.ls_resources_dir(solidlsp_settings), "groovy_language_server")
os.makedirs(static_dir, exist_ok=True)
java_dir = os.path.join(static_dir, "java")
os.makedirs(java_dir, exist_ok=True)
java_home_path = os.path.join(java_dir, java_dependency["java_home_path"])
java_path = os.path.join(java_dir, java_dependency["java_path"])
if not os.path.exists(java_path):
log.info(f"Downloading Java for {platform_id.value}...")
FileUtils.download_and_extract_archive(java_dependency["url"], java_dir, java_dependency["archiveType"])
if not platform_id.value.startswith("win-"):
os.chmod(java_path, 0o755)
assert java_path and os.path.exists(java_path), f"Java executable not found at {java_path}"
ls_jar_path = cls._find_groovy_ls_jar(solidlsp_settings)
return GroovyRuntimeDependencyPaths(java_path=java_path, java_home_path=java_home_path, ls_jar_path=ls_jar_path)
@classmethod
def _find_groovy_ls_jar(cls, solidlsp_settings: SolidLSPSettings) -> str:
"""
Find Groovy Language Server JAR file
"""
if solidlsp_settings and solidlsp_settings.ls_specific_settings:
groovy_settings = solidlsp_settings.get_ls_specific_settings(Language.GROOVY)
config_jar_path = groovy_settings.get("ls_jar_path")
if config_jar_path and os.path.exists(config_jar_path):
log.info(f"Using Groovy LS JAR from configuration: {config_jar_path}")
return config_jar_path
# if JAR not found
raise RuntimeError(
"Groovy Language Server JAR not found. To use Groovy language support:\n"
"Set 'ls_jar_path' in groovy settings in serena_config.yml:\n"
" ls_specific_settings:\n"
" groovy:\n"
" ls_jar_path: '/path/to/groovy-language-server.jar'\n"
" Ensure the JAR file is available at the configured path\n"
)
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Groovy Language Server.
"""
if not os.path.isabs(repository_absolute_path):
repository_absolute_path = os.path.abspath(repository_absolute_path)
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"clientInfo": {"name": "Serena Groovy Client", "version": "1.0.0"},
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"capabilities": {
"textDocument": {
"synchronization": {"dynamicRegistration": True, "didSave": True},
"completion": {"dynamicRegistration": True},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {"dynamicRegistration": True},
"workspaceSymbol": {"dynamicRegistration": True},
"signatureHelp": {"dynamicRegistration": True},
"rename": {"dynamicRegistration": True},
},
"workspace": {
"workspaceFolders": True,
},
},
"initializationOptions": {
"settings": {
"groovy": {
"classpath": [],
"diagnostics": {"enabled": True},
"completion": {"enabled": True},
}
},
},
"processId": os.getpid(),
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return initialize_params # type: ignore
def _start_server(self) -> None:
"""
Starts the Groovy Language Server
"""
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
self.server.on_request("client/registerCapability", do_nothing)
self.server.on_notification("language/status", do_nothing)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("language/actionableNotification", do_nothing)
log.info("Starting Groovy server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
capabilities = init_response["capabilities"]
assert "textDocumentSync" in capabilities, "Server must support textDocumentSync"
assert "hoverProvider" in capabilities, "Server must support hover"
assert "completionProvider" in capabilities, "Server must support code completion"
assert "signatureHelpProvider" in capabilities, "Server must support signature help"
assert "definitionProvider" in capabilities, "Server must support go to definition"
assert "referencesProvider" in capabilities, "Server must support find references"
assert "documentSymbolProvider" in capabilities, "Server must support document symbols"
assert "workspaceSymbolProvider" in capabilities, "Server must support workspace symbols"
self.server.notify.initialized({})
self.completions_available.set()
```
--------------------------------------------------------------------------------
/test/serena/test_symbol.py:
--------------------------------------------------------------------------------
```python
import pytest
from serena.symbol import LanguageServerSymbolRetriever, NamePathMatcher
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
class TestSymbolNameMatching:
def _create_assertion_error_message(
self,
name_path_pattern: str,
symbol_name_path_parts: list[str],
is_substring_match: bool,
expected_result: bool,
actual_result: bool,
) -> str:
"""Helper to create a detailed error message for assertions."""
qnp_repr = "/".join(symbol_name_path_parts)
return (
f"Pattern '{name_path_pattern}' (substring: {is_substring_match}) vs "
f"Qualname parts {symbol_name_path_parts} (as '{qnp_repr}'). "
f"Expected: {expected_result}, Got: {actual_result}"
)
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_parts, is_substring_match, expected",
[
# Exact matches, anywhere in the name (is_substring_match=False)
pytest.param("foo", ["foo"], False, True, id="'foo' matches 'foo' exactly (simple)"),
pytest.param("foo/", ["foo"], False, True, id="'foo/' matches 'foo' exactly (simple)"),
pytest.param("foo", ["bar", "foo"], False, True, id="'foo' matches ['bar', 'foo'] exactly (simple, last element)"),
pytest.param("foo", ["foobar"], False, False, id="'foo' does not match 'foobar' exactly (simple)"),
pytest.param(
"foo", ["bar", "foobar"], False, False, id="'foo' does not match ['bar', 'foobar'] exactly (simple, last element)"
),
pytest.param(
"foo", ["path", "to", "foo"], False, True, id="'foo' matches ['path', 'to', 'foo'] exactly (simple, last element)"
),
# Exact matches, absolute patterns (is_substring_match=False)
pytest.param("/foo", ["foo"], False, True, id="'/foo' matches ['foo'] exactly (absolute simple)"),
pytest.param("/foo", ["foo", "bar"], False, False, id="'/foo' does not match ['foo', 'bar'] (absolute simple, len mismatch)"),
pytest.param("/foo", ["bar"], False, False, id="'/foo' does not match ['bar'] (absolute simple, name mismatch)"),
pytest.param(
"/foo", ["bar", "foo"], False, False, id="'/foo' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
# Substring matches, anywhere in the name (is_substring_match=True)
pytest.param("foo", ["foobar"], True, True, id="'foo' matches 'foobar' as substring (simple)"),
pytest.param("foo", ["bar", "foobar"], True, True, id="'foo' matches ['bar', 'foobar'] as substring (simple, last element)"),
pytest.param(
"foo", ["barfoo"], True, True, id="'foo' matches 'barfoo' as substring (simple)"
), # This was potentially ambiguous before
pytest.param("foo", ["baz"], True, False, id="'foo' does not match 'baz' as substring (simple)"),
pytest.param("foo", ["bar", "baz"], True, False, id="'foo' does not match ['bar', 'baz'] as substring (simple, last element)"),
pytest.param("foo", ["my_foobar_func"], True, True, id="'foo' matches 'my_foobar_func' as substring (simple)"),
pytest.param(
"foo",
["ClassA", "my_foobar_method"],
True,
True,
id="'foo' matches ['ClassA', 'my_foobar_method'] as substring (simple, last element)",
),
pytest.param("foo", ["my_bar_func"], True, False, id="'foo' does not match 'my_bar_func' as substring (simple)"),
# Substring matches, absolute patterns (is_substring_match=True)
pytest.param("/foo", ["foobar"], True, True, id="'/foo' matches ['foobar'] as substring (absolute simple)"),
pytest.param("/foo/", ["foobar"], True, True, id="'/foo/' matches ['foobar'] as substring (absolute simple, last element)"),
pytest.param("/foo", ["barfoobaz"], True, True, id="'/foo' matches ['barfoobaz'] as substring (absolute simple)"),
pytest.param(
"/foo", ["foo", "bar"], True, False, id="'/foo' does not match ['foo', 'bar'] as substring (absolute simple, len mismatch)"
),
pytest.param("/foo", ["bar"], True, False, id="'/foo' does not match ['bar'] (absolute simple, no substr)"),
pytest.param(
"/foo", ["bar", "foo"], True, False, id="'/foo' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
pytest.param(
"/foo/", ["bar", "foo"], True, False, id="'/foo/' does not match ['bar', 'foo'] (absolute simple, position mismatch)"
),
],
)
def test_match_simple_name(self, name_path_pattern, symbol_name_path_parts, is_substring_match, expected):
"""Tests matching for simple names (no '/' in pattern)."""
result = NamePathMatcher(name_path_pattern, is_substring_match).matches_components(symbol_name_path_parts, None)
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_parts, is_substring_match, expected, result)
assert result == expected, error_msg
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_parts, is_substring_match, expected",
[
# --- Relative patterns (suffix matching) ---
# Exact matches, relative patterns (is_substring_match=False)
pytest.param("bar/foo", ["bar", "foo"], False, True, id="R: 'bar/foo' matches ['bar', 'foo'] exactly"),
pytest.param("bar/foo", ["mod", "bar", "foo"], False, True, id="R: 'bar/foo' matches ['mod', 'bar', 'foo'] exactly (suffix)"),
pytest.param(
"bar/foo", ["bar", "foo", "baz"], False, False, id="R: 'bar/foo' does not match ['bar', 'foo', 'baz'] (pattern shorter)"
),
pytest.param("bar/foo", ["bar"], False, False, id="R: 'bar/foo' does not match ['bar'] (pattern longer)"),
pytest.param("bar/foo", ["baz", "foo"], False, False, id="R: 'bar/foo' does not match ['baz', 'foo'] (first part mismatch)"),
pytest.param("bar/foo", ["bar", "baz"], False, False, id="R: 'bar/foo' does not match ['bar', 'baz'] (last part mismatch)"),
pytest.param("bar/foo", ["foo"], False, False, id="R: 'bar/foo' does not match ['foo'] (pattern longer)"),
pytest.param(
"bar/foo", ["other", "foo"], False, False, id="R: 'bar/foo' does not match ['other', 'foo'] (first part mismatch)"
),
pytest.param(
"bar/foo", ["bar", "otherfoo"], False, False, id="R: 'bar/foo' does not match ['bar', 'otherfoo'] (last part mismatch)"
),
# Substring matches, relative patterns (is_substring_match=True)
pytest.param("bar/foo", ["bar", "foobar"], True, True, id="R: 'bar/foo' matches ['bar', 'foobar'] as substring"),
pytest.param(
"bar/foo", ["mod", "bar", "foobar"], True, True, id="R: 'bar/foo' matches ['mod', 'bar', 'foobar'] as substring (suffix)"
),
pytest.param("bar/foo", ["bar", "bazfoo"], True, True, id="R: 'bar/foo' matches ['bar', 'bazfoo'] as substring"),
pytest.param("bar/fo", ["bar", "foo"], True, True, id="R: 'bar/fo' matches ['bar', 'foo'] as substring"), # codespell:ignore
pytest.param("bar/foo", ["bar", "baz"], True, False, id="R: 'bar/foo' does not match ['bar', 'baz'] (last no substr)"),
pytest.param(
"bar/foo", ["baz", "foobar"], True, False, id="R: 'bar/foo' does not match ['baz', 'foobar'] (first part mismatch)"
),
pytest.param(
"bar/foo", ["bar", "my_foobar_method"], True, True, id="R: 'bar/foo' matches ['bar', 'my_foobar_method'] as substring"
),
pytest.param(
"bar/foo",
["mod", "bar", "my_foobar_method"],
True,
True,
id="R: 'bar/foo' matches ['mod', 'bar', 'my_foobar_method'] as substring (suffix)",
),
pytest.param(
"bar/foo",
["bar", "another_method"],
True,
False,
id="R: 'bar/foo' does not match ['bar', 'another_method'] (last no substr)",
),
pytest.param(
"bar/foo",
["other", "my_foobar_method"],
True,
False,
id="R: 'bar/foo' does not match ['other', 'my_foobar_method'] (first part mismatch)",
),
pytest.param("bar/f", ["bar", "foo"], True, True, id="R: 'bar/f' matches ['bar', 'foo'] as substring"),
# Exact matches, absolute patterns (is_substring_match=False)
pytest.param("/bar/foo", ["bar", "foo"], False, True, id="A: '/bar/foo' matches ['bar', 'foo'] exactly"),
pytest.param(
"/bar/foo", ["bar", "foo", "baz"], False, False, id="A: '/bar/foo' does not match ['bar', 'foo', 'baz'] (pattern shorter)"
),
pytest.param("/bar/foo", ["bar"], False, False, id="A: '/bar/foo' does not match ['bar'] (pattern longer)"),
pytest.param("/bar/foo", ["baz", "foo"], False, False, id="A: '/bar/foo' does not match ['baz', 'foo'] (first part mismatch)"),
pytest.param("/bar/foo", ["bar", "baz"], False, False, id="A: '/bar/foo' does not match ['bar', 'baz'] (last part mismatch)"),
# Substring matches (is_substring_match=True)
pytest.param("/bar/foo", ["bar", "foobar"], True, True, id="A: '/bar/foo' matches ['bar', 'foobar'] as substring"),
pytest.param("/bar/foo", ["bar", "bazfoo"], True, True, id="A: '/bar/foo' matches ['bar', 'bazfoo'] as substring"),
pytest.param("/bar/fo", ["bar", "foo"], True, True, id="A: '/bar/fo' matches ['bar', 'foo'] as substring"), # codespell:ignore
pytest.param("/bar/foo", ["bar", "baz"], True, False, id="A: '/bar/foo' does not match ['bar', 'baz'] (last no substr)"),
pytest.param(
"/bar/foo", ["baz", "foobar"], True, False, id="A: '/bar/foo' does not match ['baz', 'foobar'] (first part mismatch)"
),
],
)
def test_match_name_path_pattern_path_len_2(self, name_path_pattern, symbol_name_path_parts, is_substring_match, expected):
"""Tests matching for qualified names (e.g. 'module/class/func')."""
result = NamePathMatcher(name_path_pattern, is_substring_match).matches_components(symbol_name_path_parts, None)
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_parts, is_substring_match, expected, result)
assert result == expected, error_msg
@pytest.mark.parametrize(
"name_path_pattern, symbol_name_path_parts, symbol_overload_idx, expected",
[
pytest.param("bar/foo", ["bar", "foo"], 0, True, id="R: 'bar/foo' matches ['bar', 'foo'] with overload_index=0"),
pytest.param("bar/foo", ["bar", "foo"], 1, True, id="R: 'bar/foo' matches ['bar', 'foo'] with overload_index=1"),
pytest.param("bar/foo[0]", ["bar", "foo"], 0, True, id="R: 'bar/foo[0]' matches ['bar', 'foo'] with overload_index=0"),
pytest.param("bar/foo[1]", ["bar", "foo"], 0, False, id="R: 'bar/foo[1]' does not match ['bar', 'foo'] with overload_index=0"),
],
)
def test_match_name_path_pattern_with_overload_idx(self, name_path_pattern, symbol_name_path_parts, symbol_overload_idx, expected):
"""Tests matching for qualified names (e.g. 'module/class/func')."""
matcher = NamePathMatcher(name_path_pattern, False)
result = matcher.matches_components(symbol_name_path_parts, symbol_overload_idx)
error_msg = self._create_assertion_error_message(name_path_pattern, symbol_name_path_parts, False, expected, result)
assert result == expected, error_msg
@pytest.mark.python
class TestLanguageServerSymbolRetriever:
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_info(self, language_server: SolidLanguageServer):
symbol_retriever = LanguageServerSymbolRetriever(language_server)
create_user_method_symbol = symbol_retriever.find("UserService/create_user", within_relative_path="test_repo/services.py")[0]
create_user_method_symbol_info = symbol_retriever.request_info_for_symbol(create_user_method_symbol)
assert "Create a new user and store it" in create_user_method_symbol_info
```
--------------------------------------------------------------------------------
/test/resources/repos/python/test_repo/custom_test/advanced_features.py:
--------------------------------------------------------------------------------
```python
"""
Advanced Python features for testing code parsing capabilities.
This module contains various advanced Python code patterns to ensure
that the code parser can correctly handle them.
"""
from __future__ import annotations
import asyncio
import os
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterable
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum, Flag, IntEnum, auto
from functools import wraps
from typing import (
Annotated,
Any,
ClassVar,
Final,
Generic,
Literal,
NewType,
Protocol,
TypedDict,
TypeVar,
)
# Type variables for generics
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
# Custom types using NewType
UserId = NewType("UserId", str)
ItemId = NewType("ItemId", int)
# Type aliases
PathLike = str | os.PathLike
JsonDict = dict[str, Any]
# TypedDict
class UserDict(TypedDict):
"""TypedDict representing user data."""
id: str
name: str
email: str
age: int
roles: list[str]
# Enums
class Status(Enum):
"""Status enum for process states."""
PENDING = "pending"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
class Priority(IntEnum):
"""Priority levels for tasks."""
LOW = 0
MEDIUM = 5
HIGH = 10
CRITICAL = auto()
class Permissions(Flag):
"""Permission flags for access control."""
NONE = 0
READ = 1
WRITE = 2
EXECUTE = 4
ALL = READ | WRITE | EXECUTE
# Abstract class with various method types
class BaseProcessor(ABC):
"""Abstract base class for processors with various method patterns."""
# Class variable with type annotation
DEFAULT_TIMEOUT: ClassVar[int] = 30
MAX_RETRIES: Final[int] = 3
def __init__(self, name: str, config: dict[str, Any] | None = None):
self.name = name
self.config = config or {}
self._status = Status.PENDING
@property
def status(self) -> Status:
"""Status property getter."""
return self._status
@status.setter
def status(self, value: Status) -> None:
"""Status property setter."""
if not isinstance(value, Status):
raise TypeError(f"Expected Status enum, got {type(value)}")
self._status = value
@abstractmethod
def process(self, data: Any) -> Any:
"""Process the input data."""
@classmethod
def create_from_config(cls, config: dict[str, Any]) -> BaseProcessor:
"""Factory classmethod."""
name = config.get("name", "default")
return cls(name=name, config=config)
@staticmethod
def validate_config(config: dict[str, Any]) -> bool:
"""Static method for config validation."""
return "name" in config
def __str__(self) -> str:
return f"{self.__class__.__name__}(name={self.name})"
# Concrete implementation of abstract class
class DataProcessor(BaseProcessor):
"""Concrete implementation of BaseProcessor."""
def __init__(self, name: str, config: dict[str, Any] | None = None, priority: Priority = Priority.MEDIUM):
super().__init__(name, config)
self.priority = priority
self.processed_count = 0
def process(self, data: Any) -> Any:
"""Process the data."""
# Nested function definition
def transform(item: Any) -> Any:
# Nested function within a nested function
def apply_rules(x: Any) -> Any:
return x
return apply_rules(item)
# Lambda function
normalize = lambda x: x / max(x) if hasattr(x, "__iter__") and len(x) > 0 else x # noqa: F841
result = transform(data)
self.processed_count += 1
return result
# Method with complex type hints
def batch_process(self, items: list[str | dict[str, Any] | tuple[Any, ...]]) -> dict[str, list[Any]]:
"""Process multiple items in a batch."""
results: dict[str, list[Any]] = {"success": [], "error": []}
for item in items:
try:
result = self.process(item)
results["success"].append(result)
except Exception as e:
results["error"].append((item, str(e)))
return results
# Generator method
def process_stream(self, data_stream: Iterable[T]) -> Iterable[T]:
"""Process a stream of data, yielding results as they're processed."""
for item in data_stream:
yield self.process(item)
# Async method
async def async_process(self, data: Any) -> Any:
"""Process data asynchronously."""
await asyncio.sleep(0.1)
return self.process(data)
# Method with function parameters
def apply_transform(self, data: Any, transform_func: Callable[[Any], Any]) -> Any:
"""Apply a custom transform function to the data."""
return transform_func(data)
# Dataclass
@dataclass
class Task:
"""Task dataclass for tracking work items."""
id: str
name: str
status: Status = Status.PENDING
priority: Priority = Priority.MEDIUM
metadata: dict[str, Any] = field(default_factory=dict)
dependencies: list[str] = field(default_factory=list)
created_at: float | None = None
def __post_init__(self):
if self.created_at is None:
import time
self.created_at = time.time()
def has_dependencies(self) -> bool:
"""Check if task has dependencies."""
return len(self.dependencies) > 0
# Generic class
class Repository(Generic[T]):
"""Generic repository for managing collections of items."""
def __init__(self):
self.items: dict[str, T] = {}
def add(self, id: str, item: T) -> None:
"""Add an item to the repository."""
self.items[id] = item
def get(self, id: str) -> T | None:
"""Get an item by id."""
return self.items.get(id)
def remove(self, id: str) -> bool:
"""Remove an item by id."""
if id in self.items:
del self.items[id]
return True
return False
def list_all(self) -> list[T]:
"""List all items."""
return list(self.items.values())
# Type with Protocol (structural subtyping)
class Serializable(Protocol):
"""Protocol for objects that can be serialized to dict."""
def to_dict(self) -> dict[str, Any]: ...
#
# Decorator function
def log_execution(func: Callable) -> Callable:
"""Decorator to log function execution."""
@wraps(func)
def wrapper(*args, **kwargs):
print(f"Executing {func.__name__}")
result = func(*args, **kwargs)
print(f"Finished {func.__name__}")
return result
return wrapper
# Context manager
@contextmanager
def transaction_context(name: str = "default"):
"""Context manager for transaction-like operations."""
print(f"Starting transaction: {name}")
try:
yield name
print(f"Committing transaction: {name}")
except Exception as e:
print(f"Rolling back transaction: {name}, error: {e}")
raise
# Function with complex parameter annotations
def advanced_search(
query: str,
filters: dict[str, Any] | None = None,
sort_by: str | None = None,
sort_order: Literal["asc", "desc"] = "asc",
page: int = 1,
page_size: int = 10,
include_metadata: bool = False,
) -> tuple[list[dict[str, Any]], int]:
"""
Advanced search function with many parameters.
Returns search results and total count.
"""
results = []
total = 0
# Simulating search functionality
return results, total
# Class with nested classes
class OuterClass:
"""Outer class with nested classes and methods."""
class NestedClass:
"""Nested class inside OuterClass."""
def __init__(self, value: Any):
self.value = value
def get_value(self) -> Any:
"""Get the stored value."""
return self.value
class DeeplyNestedClass:
"""Deeply nested class for testing parser depth capabilities."""
def deep_method(self) -> str:
"""Method in deeply nested class."""
return "deep"
def __init__(self, name: str):
self.name = name
self.nested = self.NestedClass(name)
def get_nested(self) -> NestedClass:
"""Get the nested class instance."""
return self.nested
# Method with nested functions
def process_with_nested(self, data: Any) -> Any:
"""Method demonstrating deeply nested function definitions."""
def level1(x: Any) -> Any:
"""First level nested function."""
def level2(y: Any) -> Any:
"""Second level nested function."""
def level3(z: Any) -> Any:
"""Third level nested function."""
return z
return level3(y)
return level2(x)
return level1(data)
# Metaclass example
class Meta(type):
"""Metaclass example for testing advanced class handling."""
def __new__(mcs, name, bases, attrs):
print(f"Creating class: {name}")
return super().__new__(mcs, name, bases, attrs)
def __init__(cls, name, bases, attrs):
print(f"Initializing class: {name}")
super().__init__(name, bases, attrs)
class WithMeta(metaclass=Meta):
"""Class that uses a metaclass."""
def __init__(self, value: str):
self.value = value
# Factory function that creates and returns instances
def create_processor(processor_type: str, name: str, config: dict[str, Any] | None = None) -> BaseProcessor:
"""Factory function that creates and returns processor instances."""
if processor_type == "data":
return DataProcessor(name, config)
else:
raise ValueError(f"Unknown processor type: {processor_type}")
# Nested decorator example
def with_retry(max_retries: int = 3):
"""Decorator factory that creates a retry decorator."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for attempt in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as e:
if attempt == max_retries - 1:
raise
print(f"Retrying {func.__name__} after error: {e}")
return None
return wrapper
return decorator
@with_retry(max_retries=5)
def unreliable_operation(data: Any) -> Any:
"""Function that might fail and uses the retry decorator."""
import random
if random.random() < 0.5:
raise RuntimeError("Random failure")
return data
# Complex type annotation with Annotated
ValidatedString = Annotated[str, "A string that has been validated"]
PositiveInt = Annotated[int, lambda x: x > 0]
def process_validated_data(data: ValidatedString, count: PositiveInt) -> list[str]:
"""Process data with Annotated type hints."""
return [data] * count
# Example of forward references and string literals in type annotations
class TreeNode:
"""Tree node with forward reference to itself in annotations."""
def __init__(self, value: Any):
self.value = value
self.children: list[TreeNode] = []
def add_child(self, child: TreeNode) -> None:
"""Add a child node."""
self.children.append(child)
def traverse(self) -> list[Any]:
"""Traverse the tree and return all values."""
result = [self.value]
for child in self.children:
result.extend(child.traverse())
return result
# Main entry point for demonstration
def main() -> None:
"""Main function demonstrating the use of various features."""
# Create processor
processor = DataProcessor("test-processor", {"debug": True})
# Create tasks
task1 = Task(id="task1", name="First Task")
task2 = Task(id="task2", name="Second Task", dependencies=["task1"])
# Create repository
repo: Repository[Task] = Repository()
repo.add(task1.id, task1)
repo.add(task2.id, task2)
# Process some data
data = [1, 2, 3, 4, 5]
result = processor.process(data) # noqa: F841
# Use context manager
with transaction_context("main"):
# Process more data
for task in repo.list_all():
processor.process(task.name)
# Use advanced search
_results, _total = advanced_search(query="test", filters={"status": Status.PENDING}, sort_by="priority", page=1, include_metadata=True)
# Create a tree
root = TreeNode("root")
child1 = TreeNode("child1")
child2 = TreeNode("child2")
root.add_child(child1)
root.add_child(child2)
child1.add_child(TreeNode("grandchild1"))
print("Done!")
if __name__ == "__main__":
main()
```
--------------------------------------------------------------------------------
/src/solidlsp/language_servers/taplo_server.py:
--------------------------------------------------------------------------------
```python
"""
Provides TOML specific instantiation of the LanguageServer class using Taplo.
Contains various configurations and settings specific to TOML files.
"""
import gzip
import hashlib
import logging
import os
import platform
import shutil
import socket
import stat
import threading
import urllib.request
from typing import Any
# Download timeout in seconds (prevents indefinite hangs)
DOWNLOAD_TIMEOUT_SECONDS = 120
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import PathUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
# Taplo release version and download URLs
TAPLO_VERSION = "0.10.0"
TAPLO_DOWNLOAD_BASE = f"https://github.com/tamasfe/taplo/releases/download/{TAPLO_VERSION}"
# SHA256 checksums for Taplo releases (verified from official GitHub releases)
# Source: https://github.com/tamasfe/taplo/releases/tag/0.10.0
# To update: download each release file and run: sha256sum <filename>
TAPLO_SHA256_CHECKSUMS: dict[str, str] = {
"taplo-windows-x86_64.zip": "1615eed140039bd58e7089109883b1c434de5d6de8f64a993e6e8c80ca57bdf9",
"taplo-windows-x86.zip": "b825701daab10dcfc0251e6d668cd1a9c0e351e7f6762dd20844c3f3f3553aa0",
"taplo-darwin-x86_64.gz": "898122cde3a0b1cd1cbc2d52d3624f23338218c91b5ddb71518236a4c2c10ef2",
"taplo-darwin-aarch64.gz": "713734314c3e71894b9e77513c5349835eefbd52908445a0d73b0c7dc469347d",
"taplo-linux-x86_64.gz": "8fe196b894ccf9072f98d4e1013a180306e17d244830b03986ee5e8eabeb6156",
"taplo-linux-aarch64.gz": "033681d01eec8376c3fd38fa3703c79316f5e14bb013d859943b60a07bccdcc3",
"taplo-linux-armv7.gz": "6b728896afe2573522f38b8e668b1ff40eb5928fd9d6d0c253ecae508274d417",
}
def _verify_sha256(file_path: str, expected_hash: str) -> bool:
"""Verify SHA256 checksum of a downloaded file."""
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(8192), b""):
sha256_hash.update(chunk)
actual_hash = sha256_hash.hexdigest()
return actual_hash.lower() == expected_hash.lower()
def _get_taplo_download_url() -> tuple[str, str]:
"""
Get the appropriate Taplo download URL for the current platform.
Returns:
Tuple of (download_url, executable_name)
"""
system = platform.system().lower()
machine = platform.machine().lower()
# Map machine architecture to Taplo naming convention
arch_map = {
"x86_64": "x86_64",
"amd64": "x86_64",
"x86": "x86",
"i386": "x86",
"i686": "x86",
"aarch64": "aarch64",
"arm64": "aarch64",
"armv7l": "armv7",
}
arch = arch_map.get(machine, "x86_64") # Default to x86_64
if system == "windows":
filename = f"taplo-windows-{arch}.zip"
executable = "taplo.exe"
elif system == "darwin":
filename = f"taplo-darwin-{arch}.gz"
executable = "taplo"
else: # Linux and others
filename = f"taplo-linux-{arch}.gz"
executable = "taplo"
return f"{TAPLO_DOWNLOAD_BASE}/{filename}", executable
class TaploServer(SolidLanguageServer):
"""
Provides TOML specific instantiation of the LanguageServer class using Taplo.
Taplo is a TOML toolkit with LSP support for validation, formatting, and schema support.
"""
@staticmethod
def _determine_log_level(line: str) -> int:
"""Classify Taplo stderr output to avoid false-positive errors."""
line_lower = line.lower()
# Known informational messages from Taplo
if any(
[
"schema" in line_lower and "not found" in line_lower,
"warning" in line_lower,
]
):
return logging.DEBUG
return SolidLanguageServer._determine_log_level(line)
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a TaploServer instance. This class is not meant to be instantiated directly.
Use LanguageServer.create() instead.
"""
taplo_executable_path = self._setup_runtime_dependencies(solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=f"{taplo_executable_path} lsp stdio", cwd=repository_root_path),
"toml",
solidlsp_settings,
)
self.server_ready = threading.Event()
@classmethod
def _setup_runtime_dependencies(cls, solidlsp_settings: SolidLSPSettings) -> str:
"""
Setup runtime dependencies for Taplo and return the command to start the server.
"""
# First check if taplo is already installed system-wide
system_taplo = shutil.which("taplo")
if system_taplo:
log.info(f"Using system-installed Taplo at: {system_taplo}")
return system_taplo
# Setup local installation directory
taplo_dir = os.path.join(cls.ls_resources_dir(solidlsp_settings), "taplo")
os.makedirs(taplo_dir, exist_ok=True)
_, executable_name = _get_taplo_download_url()
taplo_executable = os.path.join(taplo_dir, executable_name)
if os.path.exists(taplo_executable) and os.access(taplo_executable, os.X_OK):
log.info(f"Using cached Taplo at: {taplo_executable}")
return taplo_executable
# Download and install Taplo
log.info(f"Taplo not found. Downloading version {TAPLO_VERSION}...")
cls._download_taplo(taplo_dir, taplo_executable)
if not os.path.exists(taplo_executable):
raise FileNotFoundError(
f"Taplo executable not found at {taplo_executable}. "
"Installation may have failed. Try installing manually: cargo install taplo-cli --locked"
)
return taplo_executable
@classmethod
def _download_taplo(cls, install_dir: str, executable_path: str) -> None:
"""Download and extract Taplo binary with SHA256 verification."""
# TODO: consider using existing download utilities in SolidLSP instead of the custom logic here
download_url, _ = _get_taplo_download_url()
archive_filename = os.path.basename(download_url)
try:
log.info(f"Downloading Taplo from: {download_url}")
archive_path = os.path.join(install_dir, archive_filename)
# Download the archive with timeout to prevent indefinite hangs
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(DOWNLOAD_TIMEOUT_SECONDS)
urllib.request.urlretrieve(download_url, archive_path)
finally:
socket.setdefaulttimeout(old_timeout)
# Verify SHA256 checksum
expected_hash = TAPLO_SHA256_CHECKSUMS.get(archive_filename)
if expected_hash:
if not _verify_sha256(archive_path, expected_hash):
os.remove(archive_path)
raise RuntimeError(
f"SHA256 checksum verification failed for {archive_filename}. "
"The downloaded file may be corrupted or tampered with. "
"Try installing manually: cargo install taplo-cli --locked"
)
log.info(f"SHA256 checksum verified for {archive_filename}")
else:
log.warning(
f"No SHA256 checksum available for {archive_filename}. "
"Skipping verification - consider installing manually: cargo install taplo-cli --locked"
)
# Extract based on format
if archive_path.endswith(".gz") and not archive_path.endswith(".tar.gz"):
# Single file gzip
with gzip.open(archive_path, "rb") as f_in:
with open(executable_path, "wb") as f_out:
f_out.write(f_in.read())
elif archive_path.endswith(".zip"):
import zipfile
with zipfile.ZipFile(archive_path, "r") as zip_ref:
# Security: Validate paths to prevent zip slip vulnerability
for member in zip_ref.namelist():
member_path = os.path.normpath(os.path.join(install_dir, member))
if not member_path.startswith(os.path.normpath(install_dir)):
raise RuntimeError(f"Zip slip detected: {member} attempts to escape install directory")
zip_ref.extractall(install_dir)
# Make executable on Unix systems
if os.name != "nt":
os.chmod(executable_path, os.stat(executable_path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Clean up archive
os.remove(archive_path)
log.info(f"Taplo installed successfully at: {executable_path}")
except Exception as e:
log.error(f"Failed to download Taplo: {e}")
raise RuntimeError(
f"Failed to download Taplo from {download_url}. Try installing manually: cargo install taplo-cli --locked"
) from e
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Taplo Language Server.
"""
root_uri = PathUtils.path_to_uri(repository_absolute_path)
initialize_params = {
"locale": "en",
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"completion": {"dynamicRegistration": True, "completionItem": {"snippetSupport": True}},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"codeAction": {"dynamicRegistration": True},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
},
},
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return initialize_params # type: ignore
def _start_server(self) -> None:
"""
Starts the Taplo Language Server and initializes it.
"""
def register_capability_handler(params: Any) -> None:
return
def do_nothing(params: Any) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
log.info("Starting Taplo server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request to Taplo server")
init_response = self.server.send.initialize(initialize_params)
log.debug(f"Received initialize response from Taplo: {init_response}")
# Verify document symbol support
capabilities = init_response.get("capabilities", {})
if capabilities.get("documentSymbolProvider"):
log.info("Taplo server supports document symbols")
else:
log.warning("Taplo server may have limited document symbol support")
self.server.notify.initialized({})
log.info("Taplo server initialization complete")
self.server_ready.set()
self.completions_available.set()
def is_ignored_dirname(self, dirname: str) -> bool:
"""Define TOML-specific directories to ignore."""
return super().is_ignored_dirname(dirname) or dirname in ["target", ".cargo", "node_modules"]
```
--------------------------------------------------------------------------------
/docs/02-usage/030_clients.md:
--------------------------------------------------------------------------------
```markdown
# Connecting Your MCP Client
In the following, we provide general instructions on how to connect Serena to your MCP-enabled client,
as well as specific instructions for popular clients.
:::{note}
The configurations we provide for particular clients below will run the latest version of Serena
using the `stdio` protocol with `uvx`.
Adapt the commands to your preferred way of [running Serena](020_running), adding any additional
command-line arguments as needed.
:::
(clients-general-instructions)=
## General Instructions
In general, Serena can be used with any MCP-enabled client.
To connect Serena to your favourite client, simply
1. determine how to add a custom MCP server to your client (refer to the client's documentation).
2. add a new MCP server entry by specifying either
* a [run command](start-mcp-server) that allows the client to start the MCP server in stdio mode as a subprocess, or
* the URL of the HTTP/SSE endpoint, having started the [Serena MCP server in HTTP/SSE mode](streamable-http) beforehand.
Find concrete examples for popular clients below.
Depending on your needs, you might want to further customize Serena's behaviour by
* [adding command-line arguments](mcp-args)
* [adjusting configuration](050_configuration).
**Mode of Operation**.
Note that some clients have a per-workspace MCP configuration (e.g, VSCode and Claude Code),
while others have a global MCP configuration (e.g. Codex and Claude Desktop).
- In the per-workspace case, you typically want to start Serena with your workspace directory as the project directory
and never switch to a different project. This is achieved by specifying the
`--project <path>` argument with a single-project [context](#contexts) (e.g. `ide` or `claude-code`).
- In the global configuration case, you must first activate the project you want to work on, which you can do by asking
the LLM to do so (e.g., "Activate the current dir as project using serena"). In such settings, the `activate_project`
tool is required.
**Tool Selection**.
While you may be able to turn off tools through your client's interface (e.g., in VSCode or Claude Desktop),
we recommend selecting your base tool set through Serena's configuration, as Serena's prompts automatically
adjust based on which tools are enabled/disabled.
A key mechanism for this is to use the appropriate [context](#contexts) when starting Serena.
(clients-common-pitfalls)=
### Common Pitfalls
**Escaping Paths Correctly**.
Note that if your client configuration uses JSON, special characters (like backslashes) need to be escaped properly.
In particular, if you are specifying paths containing backslashes on Windows
(note that you can also just use forward slashes), be sure to escape them correctly (`\\`).
**Discoverability of `uvx`**.
Your client may not find the `uvx` command, even if it is on your system PATH.
In this case, a workaround is to provide the full path to the `uvx` executable.
**Environment Variables**.
Some language servers may require additional environment variables to be set (e.g. F# on macOS with Homebrew),
which you may need to explicitly add to the MCP server configuration.
Note that for some clients (e.g. Claude Desktop), the spawned MCP server process may not inherit environment variables that
are only configured in your shell profile (e.g. `.bashrc`, `.zshrc`, etc.); they would need to be set system-wide instead.
An easy fix is to add them explicitly to the MCP server entry.
For example, in Claude Desktop and other clients, you can simply add an `env` key to the `serena`
object, e.g.
```
"env": {
"DOTNET_ROOT": "/opt/homebrew/Cellar/dotnet/9.0.8/libexec"
}
```
## Claude Code
Serena is a great way to make Claude Code both cheaper and more powerful!
**Per-Project Configuration.** To add the Serena MCP server to the current project in the current directory,
use this command:
```shell
claude mcp add serena -- uvx --from git+https://github.com/oraios/serena serena start-mcp-server --context claude-code --project "$(pwd)"
```
Note:
* We use the `claude-code` context to disable unnecessary tools (avoiding duplication
with Claude Code's built-in capabilities).
* We specify the current directory as the project directory with `--project "$(pwd)"`, such
that Serena is configured to work on the current project from the get-go, following
Claude Code's mode of operation.
**Global Configuration**. Alternatively, use `--project-from-cwd` for user-level configuration that works across all projects:
```shell
claude mcp add --scope user serena -- uvx --from git+https://github.com/oraios/serena serena start-mcp-server --context=claude-code --project-from-cwd
```
Whenever you start Claude Code, Serena will search up from the current directory for `.serena/project.yml` or `.git` markers,
activating the current directory as the project if neither is found.
This mechanism makes it suitable for a single global MCP configuration.
**Maximum Token Efficiency.** To maximize token efficiency, you may want to use Claude Code's
*on-demand tool loading* feature, which is supported since at least v2.0.74 of Claude Code.
This feature avoids sending all tool descriptions to Claude upon startup, thus saving tokens.
Instead, Claude will search for tools as needed (but there are no guarantees that it will
search optimally, of course).
To enable this feature, set the environment variable `ENABLE_TOOL_SEARCH=true`.
Depending on your shell, you can also set this on a per-session basis, e.g. using
```shell
ENABLE_TOOL_SEARCH=true claude
```
in bash/zsh, or using
```shell
set ENABLE_TOOL_SEARCH=true && claude
```
in Windows CMD to launch Claude Code.
## VSCode
While serena can be directly installed from the GitHub MCP server registry, we recommend to set it up manually
(at least for now, until the configuration there has been improved). Just paste the following into
`<your_project>/.vscode/mcp.json`, or edit the entry after using the option `install into workspace`:
```json
{
"servers": {
"oraios/serena": {
"type": "stdio",
"command": "uvx",
"args": [
"--from",
"git+https://github.com/oraios/serena",
"serena",
"start-mcp-server",
"--context",
"ide",
"--project",
"${workspaceFolder}"
]
}
},
"inputs": []
}
```
## Codex
Serena works with OpenAI's Codex CLI out of the box, but you have to use the `codex` context for it to work properly. (The technical reason is that Codex doesn't fully support the MCP specifications, so some massaging of tools is required.).
Add a [run command](020_running) to `~/.codex/config.toml` to configure Serena for all Codex sessions;
create the file if it does not exist.
For example, when using `uvx`, add the following section:
```toml
[mcp_servers.serena]
command = "uvx"
args = ["--from", "git+https://github.com/oraios/serena", "serena", "start-mcp-server", "--context", "codex"]
```
After codex has started, you need to activate the project, which you can do by saying:
"Activate the current dir as project using serena"
> If you don't activate the project, you will not be able to use Serena's tools!
That's it! Have a look at `~/.codex/log/codex-tui.log` to see if any errors occurred.
Serena's dashboard will run if you have not disabled it in the configuration, but due to Codex's sandboxing, the web browser
may not open automatically. You can open it manually by going to `http://localhost:24282/dashboard/index.html` (or a higher port, if
that was already taken).
> Codex will often show the tools as `failed` even though they are successfully executed. This is not a problem, seems to be a bug in Codex. Despite the error message, everything works as expected.
## Claude Desktop
On Windows and macOS, there are official [Claude Desktop applications by Anthropic](https://claude.ai/download); for Linux, there is an [open-source
community version](https://github.com/aaddrick/claude-desktop-debian).
To configure MCP server settings, go to File / Settings / Developer / MCP Servers / Edit Config,
which will let you open the JSON file `claude_desktop_config.json`.
Add the `serena` MCP server configuration
```json
{
"mcpServers": {
"serena": {
"command": "uvx",
"args": [
"--from",
"git+https://github.com/oraios/serena",
"serena",
"start-mcp-server"
]
}
}
}
```
If your language server requires specific environment variables to be set (e.g. F# on macOS with Homebrew),
you can add them via an `env` key (see [above](#clients-common-pitfalls)).
Once you have created the new MCP server entry, save the config and then restart Claude Desktop.
:::{attention}
Be sure to fully quit the Claude Desktop application via File / Exit, as regularly closing the application will just
minimize it.
:::
After restarting, you should see Serena's tools in your chat interface (notice the small hammer icon).
For more information on MCP servers with Claude Desktop,
see [the official quick start guide](https://modelcontextprotocol.io/quickstart/user).
## JetBrains Junie
Open Junie, go to the three dots in the top right corner, then Settings / MCP Settings and add Serena to Junie's global
MCP server configuration:
```json
{
"mcpServers": {
"serena": {
"command": "uvx",
"args": [
"--from",
"git+https://github.com/oraios/serena",
"serena",
"start-mcp-server",
"--context",
"ide"
]
}
}
}
```
You will have to prompt Junie to "Activate the current project using serena's activation tool" at the
start of each session.
## JetBrains AI Assistant
Here you can set up the more convenient per-project MCP server configuration, as the AI assistant supports specifying
the launch working directory.
Go to Settings / Tools / AI Assistant / MCP and add a new **local** configuration via the `as JSON` option:
```json
{
"mcpServers": {
"serena": {
"command": "uvx",
"args": [
"--from",
"git+https://github.com/oraios/serena",
"serena",
"start-mcp-server",
"--context",
"ide",
"--project",
"$(pwd)"
]
}
}
}
```
Then make sure to configure the working directory to be the project root.
## Antigravity
:::{warning}
At the time of writing (12/2025), Antigravity does not seem to work with Serena due to schema validation issues
which are beyond our control.
The client starts Serena but then crashes with `[internal] marshal message: string field contains invalid UTF-8`.
Nevertheless, we provide a configuration that should work once the issue is resolved.
:::
Add this configuration:
```json
{
"mcpServers": {
"serena": {
"command": "uvx",
"args": [
"--from",
"git+https://github.com/oraios/serena",
"serena",
"start-mcp-server",
"--context",
"ide"
]
}
}
}
```
## Other Clients
For other clients, follow the [general instructions](#clients-general-instructions) above to set up Serena as an MCP server.
### Terminal-Based Clients
There are many terminal-based coding assistants that support MCP servers, such as
* [Gemini-CLI](https://github.com/google-gemini/gemini-cli),
* [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder),
* [rovodev](https://community.atlassian.com/forums/Rovo-for-Software-Teams-Beta/Introducing-Rovo-Dev-CLI-AI-Powered-Development-in-your-terminal/ba-p/3043623),
* [OpenHands CLI](https://docs.all-hands.dev/usage/how-to/cli-mode) and
* [opencode](https://github.com/sst/opencode).
They generally benefit from the symbolic tools provided by Serena. You might want to customize some aspects of Serena
by writing your own context, modes or prompts to adjust it to the client's respective internal capabilities (and your general workflow).
In most cases, the `ide` context is likely to be appropriate for such clients, i.e. add the arguments `--context ide`
in order to reduce tool duplication.
### MCP-Enabled IDEs and Coding Clients (Cline, Roo-Code, Cursor, Windsurf, etc.)
Most of the popular existing coding assistants (e.g. IDE extensions) and AI-enabled IDEs themselves support connections
to MCP Servers. Serena generally boosts performance by providing efficient tools for symbolic operations.
We generally recommend to use the `ide` context for these integrations by adding the arguments `--context ide`
in order to reduce tool duplication.
### Local GUIs and Agent Frameworks
Over the last months, several technologies have emerged that allow you to run a local GUI client
and connect it to an MCP server. The respective applications will typically work with Serena out of the box.
Some of the leading open source GUI applications are
* [Jan](https://jan.ai/docs/mcp),
* [OpenHands](https://github.com/All-Hands-AI/OpenHands/),
* [OpenWebUI](https://docs.openwebui.com/openapi-servers/mcp) and
* [Agno](https://docs.agno.com/introduction/playground).
These applications allow to combine Serena with almost any LLM (including locally running ones)
and offer various other integrations.
```
--------------------------------------------------------------------------------
/test/solidlsp/vue/test_vue_symbol_retrieval.py:
--------------------------------------------------------------------------------
```python
import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
from solidlsp.ls_types import SymbolKind
pytestmark = pytest.mark.vue
class TestVueSymbolRetrieval:
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_containing_symbol_script_setup_function(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
# First, get the document symbols to find the handleDigit function
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
handle_digit_symbol = next((s for s in symbols[0] if s.get("name") == "handleDigit"), None)
if not handle_digit_symbol or "range" not in handle_digit_symbol:
pytest.skip("handleDigit symbol not found - test fixture may need updating")
# Get a position inside the handleDigit function body
# We'll use a line a few lines after the function start
func_start_line = handle_digit_symbol["range"]["start"]["line"]
position_inside_func = func_start_line + 1
position_character = 4
# Request the containing symbol for this position
containing_symbol = language_server.request_containing_symbol(
file_path, position_inside_func, position_character, include_body=True
)
# Verify we found the correct containing symbol
assert containing_symbol is not None, "Should find containing symbol inside handleDigit function"
assert containing_symbol["name"] == "handleDigit", f"Expected handleDigit, got {containing_symbol.get('name')}"
assert containing_symbol["kind"] in [
SymbolKind.Function,
SymbolKind.Method,
SymbolKind.Variable,
], f"Expected function-like kind, got {containing_symbol.get('kind')}"
# Verify the body is included if available
if "body" in containing_symbol:
assert "handleDigit" in containing_symbol["body"], "Function body should contain function name"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_containing_symbol_computed_property(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
# Find the formattedDisplay computed property
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
formatted_display_symbol = next((s for s in symbols[0] if s.get("name") == "formattedDisplay"), None)
if not formatted_display_symbol or "range" not in formatted_display_symbol:
pytest.skip("formattedDisplay computed property not found - test fixture may need updating")
# Get a position inside the computed property body
computed_start_line = formatted_display_symbol["range"]["start"]["line"]
position_inside_computed = computed_start_line + 1
position_character = 4
# Request the containing symbol for this position
containing_symbol = language_server.request_containing_symbol(
file_path, position_inside_computed, position_character, include_body=True
)
# Verify we found the correct containing symbol
# The language server returns the arrow function inside computed() rather than
# the variable name. This is technically correct from LSP's perspective.
assert containing_symbol is not None, "Should find containing symbol inside computed property"
assert containing_symbol["name"] in [
"formattedDisplay",
"computed() callback",
], f"Expected formattedDisplay or computed() callback, got {containing_symbol.get('name')}"
assert containing_symbol["kind"] in [
SymbolKind.Property,
SymbolKind.Variable,
SymbolKind.Function,
], f"Expected property/variable/function kind for computed, got {containing_symbol.get('kind')}"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_containing_symbol_no_containing_symbol(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
# Position in the import statements at the top of the script setup
# Line 1-6 contain imports in CalculatorInput.vue
import_line = 2
import_character = 10
# Request containing symbol for a position in the imports
containing_symbol = language_server.request_containing_symbol(file_path, import_line, import_character)
# Should return None or empty dictionary for positions without containing symbol
assert (
containing_symbol is None or containing_symbol == {}
), f"Expected None or empty dict for import position, got {containing_symbol}"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_referencing_symbols_store_function(self, language_server: SolidLanguageServer) -> None:
store_file = os.path.join("src", "stores", "calculator.ts")
# Find the 'add' action in the calculator store
symbols = language_server.request_document_symbols(store_file).get_all_symbols_and_roots()
add_symbol = next((s for s in symbols[0] if s.get("name") == "add"), None)
if not add_symbol or "selectionRange" not in add_symbol:
pytest.skip("add action not found in calculator store - test fixture may need updating")
# Request referencing symbols for the add action (include_self=True to get at least the definition)
sel_start = add_symbol["selectionRange"]["start"]
ref_symbols = [
ref.symbol
for ref in language_server.request_referencing_symbols(store_file, sel_start["line"], sel_start["character"], include_self=True)
]
assert isinstance(ref_symbols, list), f"request_referencing_symbols should return a list, got {type(ref_symbols)}"
for symbol in ref_symbols:
assert "name" in symbol, "Referencing symbol should have a name"
assert "kind" in symbol, "Referencing symbol should have a kind"
vue_refs = [
symbol for symbol in ref_symbols if "location" in symbol and "uri" in symbol["location"] and ".vue" in symbol["location"]["uri"]
]
if len(vue_refs) > 0:
calculator_input_refs = [
ref
for ref in vue_refs
if "location" in ref and "uri" in ref["location"] and "CalculatorInput.vue" in ref["location"]["uri"]
]
for ref in calculator_input_refs:
assert "name" in ref, "Reference should have name"
assert "location" in ref, "Reference should have location"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_referencing_symbols_composable(self, language_server: SolidLanguageServer) -> None:
composable_file = os.path.join("src", "composables", "useFormatter.ts")
# Find the useFormatter composable function
symbols = language_server.request_document_symbols(composable_file).get_all_symbols_and_roots()
use_formatter_symbol = next((s for s in symbols[0] if s.get("name") == "useFormatter"), None)
if not use_formatter_symbol or "selectionRange" not in use_formatter_symbol:
pytest.skip("useFormatter composable not found - test fixture may need updating")
# Request referencing symbols for the composable
sel_start = use_formatter_symbol["selectionRange"]["start"]
ref_symbols = [
ref.symbol for ref in language_server.request_referencing_symbols(composable_file, sel_start["line"], sel_start["character"])
]
# Verify we found references - useFormatter is imported and used in CalculatorInput.vue
assert (
len(ref_symbols) >= 1
), f"useFormatter should have at least 1 reference (used in CalculatorInput.vue), found {len(ref_symbols)} references"
# Check for references in Vue components
vue_refs = [
symbol for symbol in ref_symbols if "location" in symbol and "uri" in symbol["location"] and ".vue" in symbol["location"]["uri"]
]
# CalculatorInput.vue imports and uses useFormatter
assert len(vue_refs) >= 1, f"Should find at least 1 Vue component reference to useFormatter, found {len(vue_refs)}"
# Verify we found reference in CalculatorInput.vue specifically
has_calculator_input_ref = any(
"CalculatorInput.vue" in ref["location"]["uri"] for ref in vue_refs if "location" in ref and "uri" in ref["location"]
)
assert has_calculator_input_ref, (
f"Should find reference to useFormatter in CalculatorInput.vue. "
f"Found references in: {[ref['location']['uri'] for ref in vue_refs if 'location' in ref and 'uri' in ref['location']]}"
)
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_vue_component_cross_references(self, language_server: SolidLanguageServer) -> None:
input_file = os.path.join("src", "components", "CalculatorInput.vue")
button_file = os.path.join("src", "components", "CalculatorButton.vue")
definitions = language_server.request_definition(input_file, 4, 10)
assert len(definitions) == 1, f"Should find exactly 1 definition for CalculatorButton import, got {len(definitions)}"
assert (
"CalculatorButton.vue" in definitions[0]["relativePath"]
), f"Definition should point to CalculatorButton.vue, got {definitions[0]['relativePath']}"
refs = language_server.request_references(input_file, 4, 10)
assert len(refs) >= 2, (
f"Should find at least 2 references to CalculatorButton (import + template usages). "
f"In CalculatorInput.vue, CalculatorButton is imported and used ~7 times in template. Found {len(refs)} references"
)
button_symbols = language_server.request_document_symbols(button_file).get_all_symbols_and_roots()
symbol_names = [s.get("name") for s in button_symbols[0]]
assert "Props" in symbol_names, "CalculatorButton.vue should have Props interface"
assert "handleClick" in symbol_names, "CalculatorButton.vue should have handleClick function"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_defining_symbol_import_resolution(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
# Find the import position for useCalculatorStore
# In CalculatorInput.vue (0-indexed lines):
# Line 2: import { useCalculatorStore } from '@/stores/calculator'
# Line 8: const store = useCalculatorStore()
# We'll request definition at the position of "useCalculatorStore" in the usage line
defining_symbol = language_server.request_defining_symbol(file_path, 8, 18)
if defining_symbol is None:
# Some language servers may not support go-to-definition at usage sites
# Try at line 2 (import statement) instead
defining_symbol = language_server.request_defining_symbol(file_path, 2, 18)
# Verify we found a defining symbol
assert defining_symbol is not None, "Should find defining symbol for useCalculatorStore"
assert "name" in defining_symbol, "Defining symbol should have a name"
assert defining_symbol.get("name") in [
"useCalculatorStore",
"calculator",
], f"Expected useCalculatorStore or calculator, got {defining_symbol.get('name')}"
# Verify it points to the store file
if "location" in defining_symbol and "uri" in defining_symbol["location"]:
assert (
"calculator.ts" in defining_symbol["location"]["uri"]
), f"Should point to calculator.ts, got {defining_symbol['location']['uri']}"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_request_defining_symbol_component_import(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
definitions = language_server.request_definition(file_path, 4, 10)
assert len(definitions) > 0, "Should find definition for CalculatorButton import"
definition = definitions[0]
assert definition["relativePath"] is not None, "Definition should have a relative path"
assert (
"CalculatorButton.vue" in definition["relativePath"]
), f"Should point to CalculatorButton.vue, got {definition['relativePath']}"
assert definition["range"]["start"]["line"] == 0, "Definition should point to start of .vue file"
defining_symbol = language_server.request_defining_symbol(file_path, 4, 10)
assert defining_symbol is None or "name" in defining_symbol, "If defining_symbol is found, it should have a name"
```
--------------------------------------------------------------------------------
/src/solidlsp/language_servers/powershell_language_server.py:
--------------------------------------------------------------------------------
```python
"""
Provides PowerShell specific instantiation of the LanguageServer class using PowerShell Editor Services.
Contains various configurations and settings specific to PowerShell scripting.
"""
import logging
import os
import pathlib
import platform
import shutil
import tempfile
import threading
import zipfile
from pathlib import Path
import requests
from overrides import override
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
# PowerShell Editor Services version to download
PSES_VERSION = "4.4.0"
class PowerShellLanguageServer(SolidLanguageServer):
"""
Provides PowerShell specific instantiation of the LanguageServer class using PowerShell Editor Services.
Contains various configurations and settings specific to PowerShell scripting.
"""
@override
def is_ignored_dirname(self, dirname: str) -> bool:
# For PowerShell projects, ignore common build/output directories
return super().is_ignored_dirname(dirname) or dirname in [
"bin",
"obj",
".vscode",
"TestResults",
"Output",
]
@staticmethod
def _get_pwsh_path() -> str | None:
"""Get the path to PowerShell Core (pwsh) executable."""
# Check if pwsh is in PATH
pwsh = shutil.which("pwsh")
if pwsh:
return pwsh
# Check common installation locations
home = Path.home()
system = platform.system()
possible_paths: list[Path] = []
if system == "Windows":
possible_paths = [
Path(os.environ.get("PROGRAMFILES", "C:\\Program Files")) / "PowerShell" / "7" / "pwsh.exe",
Path(os.environ.get("PROGRAMFILES", "C:\\Program Files")) / "PowerShell" / "7-preview" / "pwsh.exe",
home / "AppData" / "Local" / "Microsoft" / "PowerShell" / "pwsh.exe",
]
elif system == "Darwin":
possible_paths = [
Path("/usr/local/bin/pwsh"),
Path("/opt/homebrew/bin/pwsh"),
home / ".dotnet" / "tools" / "pwsh",
]
else: # Linux
possible_paths = [
Path("/usr/bin/pwsh"),
Path("/usr/local/bin/pwsh"),
Path("/opt/microsoft/powershell/7/pwsh"),
home / ".dotnet" / "tools" / "pwsh",
]
for path in possible_paths:
if path.exists():
return str(path)
return None
@classmethod
def _get_pses_path(cls, solidlsp_settings: SolidLSPSettings) -> str | None:
"""Get the path to PowerShell Editor Services installation."""
install_dir = Path(cls.ls_resources_dir(solidlsp_settings)) / "powershell"
start_script = install_dir / "PowerShellEditorServices" / "Start-EditorServices.ps1"
if start_script.exists():
return str(start_script)
return None
@classmethod
def _download_pses(cls, solidlsp_settings: SolidLSPSettings) -> str:
"""Download and install PowerShell Editor Services."""
download_url = (
f"https://github.com/PowerShell/PowerShellEditorServices/releases/download/v{PSES_VERSION}/PowerShellEditorServices.zip"
)
# Create installation directory
install_dir = Path(cls.ls_resources_dir(solidlsp_settings)) / "powershell"
install_dir.mkdir(parents=True, exist_ok=True)
# Download the file
log.info(f"Downloading PowerShell Editor Services from {download_url}...")
response = requests.get(download_url, stream=True, timeout=120)
response.raise_for_status()
# Save the zip file
zip_path = install_dir / "PowerShellEditorServices.zip"
with open(zip_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
log.info(f"Extracting PowerShell Editor Services to {install_dir}...")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(install_dir)
# Clean up zip file
zip_path.unlink()
start_script = install_dir / "PowerShellEditorServices" / "Start-EditorServices.ps1"
if not start_script.exists():
raise RuntimeError(f"Failed to find Start-EditorServices.ps1 after extraction at {start_script}")
log.info(f"PowerShell Editor Services installed at: {install_dir}")
return str(start_script)
@classmethod
def _setup_runtime_dependency(cls, solidlsp_settings: SolidLSPSettings) -> tuple[str, str, str]:
"""
Check if required PowerShell runtime dependencies are available.
Downloads PowerShell Editor Services if not present.
Returns:
tuple: (pwsh_path, start_script_path, bundled_modules_path)
"""
# Check for PowerShell Core
pwsh_path = cls._get_pwsh_path()
if not pwsh_path:
raise RuntimeError(
"PowerShell Core (pwsh) is not installed or not in PATH. "
"Please install PowerShell 7+ from https://github.com/PowerShell/PowerShell"
)
# Check for PowerShell Editor Services
pses_path = cls._get_pses_path(solidlsp_settings)
if not pses_path:
log.info("PowerShell Editor Services not found. Downloading...")
pses_path = cls._download_pses(solidlsp_settings)
# The bundled modules path is the directory containing PowerShellEditorServices
bundled_modules_path = str(Path(pses_path).parent)
return pwsh_path, pses_path, bundled_modules_path
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
pwsh_path, pses_path, bundled_modules_path = self._setup_runtime_dependency(solidlsp_settings)
# Create a temp directory for PSES logs and session details
pses_temp_dir = Path(tempfile.gettempdir()) / "solidlsp_pses"
pses_temp_dir.mkdir(parents=True, exist_ok=True)
log_path = pses_temp_dir / "pses.log"
session_details_path = pses_temp_dir / "session.json"
# Build the command to start PowerShell Editor Services in stdio mode
# PSES requires several parameters beyond just -Stdio
# Using list format for robust argument handling - the PowerShell command
# after -Command must be a single string element
pses_command = (
f"& '{pses_path}' "
f"-HostName 'SolidLSP' "
f"-HostProfileId 'solidlsp' "
f"-HostVersion '1.0.0' "
f"-BundledModulesPath '{bundled_modules_path}' "
f"-LogPath '{log_path}' "
f"-LogLevel 'Information' "
f"-SessionDetailsPath '{session_details_path}' "
f"-Stdio"
)
cmd: list[str] = [
pwsh_path,
"-NoLogo",
"-NoProfile",
"-Command",
pses_command,
]
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=cmd, cwd=repository_root_path),
"powershell",
solidlsp_settings,
)
self.server_ready = threading.Event()
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the PowerShell Editor Services.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"locale": "en",
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"completion": {
"dynamicRegistration": True,
"completionItem": {
"snippetSupport": True,
"commitCharactersSupport": True,
"documentationFormat": ["markdown", "plaintext"],
"deprecatedSupport": True,
},
},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
"parameterInformation": {"labelOffsetSupport": True},
},
},
"codeAction": {"dynamicRegistration": True},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"configuration": True,
"symbol": {
"dynamicRegistration": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
},
},
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return initialize_params # type: ignore[return-value]
def _start_server(self) -> None:
"""
Starts the PowerShell Editor Services, waits for the server to be ready.
"""
self._dynamic_capabilities: set[str] = set()
def register_capability_handler(params: dict) -> None:
"""Handle dynamic capability registration from PSES."""
registrations = params.get("registrations", [])
for reg in registrations:
method = reg.get("method", "")
log.info(f"PSES registered dynamic capability: {method}")
self._dynamic_capabilities.add(method)
# Mark server ready when we get document symbol registration
if method == "textDocument/documentSymbol":
self.server_ready.set()
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
# Check for PSES ready signals
message_text = msg.get("message", "")
if "started" in message_text.lower() or "ready" in message_text.lower():
log.info("PowerShell Editor Services ready signal detected")
self.server_ready.set()
self.completions_available.set()
def do_nothing(params: dict) -> None:
return
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("powerShell/executionStatusChanged", do_nothing)
log.info("Starting PowerShell Editor Services process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
log.info(f"Received initialize response from PowerShell server: {init_response}")
# Verify server capabilities - PSES uses dynamic capability registration
# so we check for either static or dynamic capabilities
capabilities = init_response.get("capabilities", {})
log.info(f"Server capabilities: {capabilities}")
# Send initialized notification to trigger dynamic capability registration
self.server.notify.initialized({})
# Wait for server readiness with timeout
log.info("Waiting for PowerShell Editor Services to be ready...")
if not self.server_ready.wait(timeout=10.0):
# Fallback: assume server is ready after timeout
log.info("Timeout waiting for PSES ready signal, proceeding anyway")
self.server_ready.set()
self.completions_available.set()
else:
log.info("PowerShell Editor Services initialization complete")
```
--------------------------------------------------------------------------------
/src/solidlsp/language_servers/pascal_server.py:
--------------------------------------------------------------------------------
```python
"""
Provides Pascal/Free Pascal specific instantiation of the LanguageServer class using pasls.
Contains various configurations and settings specific to Pascal and Free Pascal.
pasls installation strategy:
1. Use existing pasls from PATH
2. Download prebuilt binary from GitHub releases
Supported platforms for binary download:
- linux-x64, linux-arm64
- osx-x64, osx-arm64
- win-x64
You can pass the following entries in ls_specific_settings["pascal"]:
- (reserved for future use)
"""
import logging
import os
import pathlib
import shutil
import threading
from solidlsp.language_servers.common import RuntimeDependency, RuntimeDependencyCollection, quote_windows_path
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
log = logging.getLogger(__name__)
class PascalLanguageServer(SolidLanguageServer):
"""
Provides Pascal specific instantiation of the LanguageServer class using pasls.
Contains various configurations and settings specific to Free Pascal and Lazarus.
"""
PASLS_VERSION = "0.1.0"
PASLS_RELEASES_URL = "https://github.com/zen010101/pascal-language-server/releases/download"
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a PascalLanguageServer instance. This class is not meant to be instantiated directly.
Use LanguageServer.create() instead.
"""
pasls_executable_path = self._setup_runtime_dependencies(solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=pasls_executable_path, cwd=repository_root_path),
"pascal",
solidlsp_settings,
)
self.server_ready = threading.Event()
self.completions_available_event = threading.Event()
@classmethod
def _setup_runtime_dependencies(cls, solidlsp_settings: SolidLSPSettings) -> str:
"""
Setup runtime dependencies for Pascal Language Server (pasls).
Returns:
str: The command to start the pasls server
"""
# Check if pasls is already in PATH
pasls_in_path = shutil.which("pasls")
if pasls_in_path:
log.info(f"Found pasls in PATH: {pasls_in_path}")
return quote_windows_path(pasls_in_path)
# Use RuntimeDependencyCollection for download
deps = RuntimeDependencyCollection(
[
RuntimeDependency(
id="PascalLanguageServer",
description="Pascal Language Server for Linux (x64)",
url=f"{cls.PASLS_RELEASES_URL}/v{cls.PASLS_VERSION}/pasls-linux-x64.tar.gz",
platform_id="linux-x64",
archive_type="gztar",
binary_name="pasls",
),
RuntimeDependency(
id="PascalLanguageServer",
description="Pascal Language Server for Linux (arm64)",
url=f"{cls.PASLS_RELEASES_URL}/v{cls.PASLS_VERSION}/pasls-linux-arm64.tar.gz",
platform_id="linux-arm64",
archive_type="gztar",
binary_name="pasls",
),
RuntimeDependency(
id="PascalLanguageServer",
description="Pascal Language Server for macOS (x64)",
url=f"{cls.PASLS_RELEASES_URL}/v{cls.PASLS_VERSION}/pasls-darwin-x64.tar.gz",
platform_id="osx-x64",
archive_type="gztar",
binary_name="pasls",
),
RuntimeDependency(
id="PascalLanguageServer",
description="Pascal Language Server for macOS (arm64)",
url=f"{cls.PASLS_RELEASES_URL}/v{cls.PASLS_VERSION}/pasls-darwin-arm64.tar.gz",
platform_id="osx-arm64",
archive_type="gztar",
binary_name="pasls",
),
RuntimeDependency(
id="PascalLanguageServer",
description="Pascal Language Server for Windows (x64)",
url=f"{cls.PASLS_RELEASES_URL}/v{cls.PASLS_VERSION}/pasls-win32-x64.zip",
platform_id="win-x64",
archive_type="zip",
binary_name="pasls.exe",
),
]
)
pasls_dir = cls.ls_resources_dir(solidlsp_settings)
pasls_executable_path = deps.binary_path(pasls_dir)
if not os.path.exists(pasls_executable_path):
log.info(f"Downloading pasls to {pasls_dir}...")
deps.install(pasls_dir)
assert os.path.exists(pasls_executable_path), f"pasls executable not found at {pasls_executable_path}"
os.chmod(pasls_executable_path, 0o755)
log.info(f"Using pasls at: {pasls_executable_path}")
return quote_windows_path(pasls_executable_path)
@staticmethod
def _get_initialize_params(repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the Pascal Language Server.
pasls (genericptr/pascal-language-server) reads compiler paths from:
1. Environment variables (PP, FPCDIR, LAZARUSDIR) via TCodeToolsOptions.InitWithEnvironmentVariables
2. Lazarus config files via GuessCodeToolConfig
We only pass target OS/CPU in initializationOptions if explicitly set.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
# Build initializationOptions from environment variables
# pasls reads these to configure CodeTools:
# - PP: Path to FPC compiler executable
# - FPCDIR: Path to FPC source directory
# - LAZARUSDIR: Path to Lazarus directory (only needed for LCL projects)
# - FPCTARGET: Target OS
# - FPCTARGETCPU: Target CPU
initialization_options: dict = {}
env_vars = ["PP", "FPCDIR", "LAZARUSDIR", "FPCTARGET", "FPCTARGETCPU"]
for var in env_vars:
value = os.environ.get(var, "")
if value:
initialization_options[var] = value
initialize_params = {
"locale": "en",
"capabilities": {
"textDocument": {
"synchronization": {
"didSave": True,
"dynamicRegistration": True,
"willSave": True,
"willSaveWaitUntil": True,
},
"completion": {
"dynamicRegistration": True,
"completionItem": {
"snippetSupport": True,
"commitCharactersSupport": True,
"documentationFormat": ["markdown", "plaintext"],
},
},
"hover": {
"dynamicRegistration": True,
"contentFormat": ["markdown", "plaintext"],
},
"signatureHelp": {
"dynamicRegistration": True,
"signatureInformation": {
"documentationFormat": ["markdown", "plaintext"],
},
},
"definition": {"dynamicRegistration": True, "linkSupport": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"codeAction": {
"dynamicRegistration": True,
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports",
]
}
},
},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
"executeCommand": {"dynamicRegistration": True},
"configuration": True,
"workspaceEdit": {
"documentChanges": True,
},
},
},
"initializationOptions": initialization_options,
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return initialize_params # type: ignore
def _start_server(self) -> None:
"""
Starts the Pascal Language Server, waits for the server to be ready and yields the LanguageServer instance.
"""
def register_capability_handler(params: dict) -> None:
log.debug(f"Capability registered: {params}")
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
# Mark server as ready when we see initialization messages
message_text = msg.get("message", "")
if "initialized" in message_text.lower() or "ready" in message_text.lower():
log.info("Pascal language server ready signal detected")
self.server_ready.set()
self.completions_available.set()
def publish_diagnostics(params: dict) -> None:
log.debug(f"Diagnostics: {params}")
return
def do_nothing(params: dict) -> None:
return
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_notification("window/showMessage", window_log_message)
self.server.on_notification("textDocument/publishDiagnostics", publish_diagnostics)
self.server.on_notification("$/progress", do_nothing)
log.info("Starting Pascal server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info("Sending initialize request from LSP client to LSP server and awaiting response")
init_response = self.server.send.initialize(initialize_params)
log.debug(f"Received initialize response from Pascal server: {init_response}")
# Verify capabilities
capabilities = init_response.get("capabilities", {})
assert "textDocumentSync" in capabilities
# Check for various capabilities
if "completionProvider" in capabilities:
log.info("Pascal server supports code completion")
if "definitionProvider" in capabilities:
log.info("Pascal server supports go to definition")
if "referencesProvider" in capabilities:
log.info("Pascal server supports find references")
if "documentSymbolProvider" in capabilities:
log.info("Pascal server supports document symbols")
self.server.notify.initialized({})
# Wait for server readiness with timeout
log.info("Waiting for Pascal language server to be ready...")
if not self.server_ready.wait(timeout=5.0):
# pasls may not send explicit ready signals, so we proceed after timeout
log.info("Timeout waiting for Pascal server ready signal, assuming server is ready")
self.server_ready.set()
self.completions_available.set()
else:
log.info("Pascal server initialization complete")
def is_ignored_dirname(self, dirname: str) -> bool:
"""
Check if a directory should be ignored for Pascal projects.
Common Pascal/Lazarus directories to ignore.
"""
ignored_dirs = {
"lib",
"backup",
"__history",
"__recovery",
"bin",
".git",
".svn",
".hg",
"node_modules",
}
return dirname.lower() in ignored_dirs
```
--------------------------------------------------------------------------------
/src/solidlsp/language_servers/typescript_language_server.py:
--------------------------------------------------------------------------------
```python
"""
Provides TypeScript specific instantiation of the LanguageServer class. Contains various configurations and settings specific to TypeScript.
"""
import logging
import os
import pathlib
import shutil
import threading
from typing import Any, cast
from overrides import override
from sensai.util.logging import LogTime
from solidlsp import ls_types
from solidlsp.ls import SolidLanguageServer
from solidlsp.ls_config import LanguageServerConfig
from solidlsp.ls_utils import PlatformId, PlatformUtils
from solidlsp.lsp_protocol_handler.lsp_types import InitializeParams
from solidlsp.lsp_protocol_handler.server import ProcessLaunchInfo
from solidlsp.settings import SolidLSPSettings
from .common import RuntimeDependency, RuntimeDependencyCollection
log = logging.getLogger(__name__)
# Platform-specific imports
if os.name != "nt": # Unix-like systems
import pwd
else:
# Dummy pwd module for Windows
class pwd: # type: ignore
@staticmethod
def getpwuid(uid: Any) -> Any:
return type("obj", (), {"pw_name": os.environ.get("USERNAME", "unknown")})()
# Conditionally import pwd module (Unix-only)
if not PlatformUtils.get_platform_id().value.startswith("win"):
pass
def prefer_non_node_modules_definition(definitions: list[ls_types.Location]) -> ls_types.Location:
"""
Select the preferred definition, preferring source files over type definitions.
TypeScript language servers often return both type definitions (.d.ts files
in node_modules) and source definitions. This function prefers:
1. Files not in node_modules
2. Falls back to first definition if all are in node_modules
:param definitions: A non-empty list of definition locations.
:return: The preferred definition location.
"""
for d in definitions:
rel_path = d.get("relativePath", "")
if rel_path and "node_modules" not in rel_path:
return d
return definitions[0]
class TypeScriptLanguageServer(SolidLanguageServer):
"""
Provides TypeScript specific instantiation of the LanguageServer class. Contains various configurations and settings specific to TypeScript.
You can pass the following entries in ls_specific_settings["typescript"]:
- typescript_version: Version of TypeScript to install (default: "5.9.3")
- typescript_language_server_version: Version of typescript-language-server to install (default: "5.1.3")
"""
def __init__(self, config: LanguageServerConfig, repository_root_path: str, solidlsp_settings: SolidLSPSettings):
"""
Creates a TypeScriptLanguageServer instance. This class is not meant to be instantiated directly. Use LanguageServer.create() instead.
"""
ts_lsp_executable_path = self._setup_runtime_dependencies(config, solidlsp_settings)
super().__init__(
config,
repository_root_path,
ProcessLaunchInfo(cmd=ts_lsp_executable_path, cwd=repository_root_path),
"typescript",
solidlsp_settings,
)
self.server_ready = threading.Event()
self.initialize_searcher_command_available = threading.Event()
@override
def is_ignored_dirname(self, dirname: str) -> bool:
return super().is_ignored_dirname(dirname) or dirname in [
"node_modules",
"dist",
"build",
"coverage",
]
@staticmethod
def _determine_log_level(line: str) -> int:
"""Classify typescript-language-server stderr output to avoid false-positive errors."""
return SolidLanguageServer._determine_log_level(line)
@classmethod
def _setup_runtime_dependencies(cls, config: LanguageServerConfig, solidlsp_settings: SolidLSPSettings) -> list[str]:
"""
Setup runtime dependencies for TypeScript Language Server and return the command to start the server.
"""
platform_id = PlatformUtils.get_platform_id()
valid_platforms = [
PlatformId.LINUX_x64,
PlatformId.LINUX_arm64,
PlatformId.OSX,
PlatformId.OSX_x64,
PlatformId.OSX_arm64,
PlatformId.WIN_x64,
PlatformId.WIN_arm64,
]
assert platform_id in valid_platforms, f"Platform {platform_id} is not supported for multilspy javascript/typescript at the moment"
# Get version settings from ls_specific_settings or use defaults
language_specific_config = solidlsp_settings.get_ls_specific_settings(cls.get_language_enum_instance())
typescript_version = language_specific_config.get("typescript_version", "5.9.3")
typescript_language_server_version = language_specific_config.get("typescript_language_server_version", "5.1.3")
deps = RuntimeDependencyCollection(
[
RuntimeDependency(
id="typescript",
description="typescript package",
command=["npm", "install", "--prefix", "./", f"typescript@{typescript_version}"],
platform_id="any",
),
RuntimeDependency(
id="typescript-language-server",
description="typescript-language-server package",
command=["npm", "install", "--prefix", "./", f"typescript-language-server@{typescript_language_server_version}"],
platform_id="any",
),
]
)
# Verify both node and npm are installed
is_node_installed = shutil.which("node") is not None
assert is_node_installed, "node is not installed or isn't in PATH. Please install NodeJS and try again."
is_npm_installed = shutil.which("npm") is not None
assert is_npm_installed, "npm is not installed or isn't in PATH. Please install npm and try again."
# Install typescript and typescript-language-server if not already installed or version mismatch
tsserver_ls_dir = os.path.join(cls.ls_resources_dir(solidlsp_settings), "ts-lsp")
tsserver_executable_path = os.path.join(tsserver_ls_dir, "node_modules", ".bin", "typescript-language-server")
# Check if installation is needed based on executable AND version
version_file = os.path.join(tsserver_ls_dir, ".installed_version")
expected_version = f"{typescript_version}_{typescript_language_server_version}"
needs_install = False
if not os.path.exists(tsserver_executable_path):
log.info(f"Typescript Language Server executable not found at {tsserver_executable_path}.")
needs_install = True
elif os.path.exists(version_file):
with open(version_file) as f:
installed_version = f.read().strip()
if installed_version != expected_version:
log.info(
f"TypeScript Language Server version mismatch: installed={installed_version}, expected={expected_version}. Reinstalling..."
)
needs_install = True
else:
# No version file exists, assume old installation needs refresh
log.info("TypeScript Language Server version file not found. Reinstalling to ensure correct version...")
needs_install = True
if needs_install:
log.info("Installing TypeScript Language Server dependencies...")
with LogTime("Installation of TypeScript language server dependencies", logger=log):
deps.install(tsserver_ls_dir)
# Write version marker file
with open(version_file, "w") as f:
f.write(expected_version)
log.info("TypeScript language server dependencies installed successfully")
if not os.path.exists(tsserver_executable_path):
raise FileNotFoundError(
f"typescript-language-server executable not found at {tsserver_executable_path}, something went wrong with the installation."
)
return [tsserver_executable_path, "--stdio"]
def _get_initialize_params(self, repository_absolute_path: str) -> InitializeParams:
"""
Returns the initialize params for the TypeScript Language Server.
"""
root_uri = pathlib.Path(repository_absolute_path).as_uri()
initialize_params = {
"locale": "en",
"capabilities": {
"textDocument": {
"synchronization": {"didSave": True, "dynamicRegistration": True},
"completion": {"dynamicRegistration": True, "completionItem": {"snippetSupport": True}},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentSymbol": {
"dynamicRegistration": True,
"hierarchicalDocumentSymbolSupport": True,
"symbolKind": {"valueSet": list(range(1, 27))},
},
"hover": {"dynamicRegistration": True, "contentFormat": ["markdown", "plaintext"]},
"signatureHelp": {"dynamicRegistration": True},
"codeAction": {"dynamicRegistration": True},
"rename": {"dynamicRegistration": True, "prepareSupport": True},
},
"workspace": {
"workspaceFolders": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
},
},
"processId": os.getpid(),
"rootPath": repository_absolute_path,
"rootUri": root_uri,
"workspaceFolders": [
{
"uri": root_uri,
"name": os.path.basename(repository_absolute_path),
}
],
}
return cast(InitializeParams, initialize_params)
def _start_server(self) -> None:
"""
Starts the TypeScript Language Server, waits for the server to be ready and yields the LanguageServer instance.
Usage:
```
async with lsp.start_server():
# LanguageServer has been initialized and ready to serve requests
await lsp.request_definition(...)
await lsp.request_references(...)
# Shutdown the LanguageServer on exit from scope
# LanguageServer has been shutdown
"""
def register_capability_handler(params: dict) -> None:
assert "registrations" in params
for registration in params["registrations"]:
if registration["method"] == "workspace/executeCommand":
self.initialize_searcher_command_available.set()
# TypeScript doesn't have a direct equivalent to resolve_main_method
# You might want to set a different flag or remove this line
# self.resolve_main_method_available.set()
return
def execute_client_command_handler(params: dict) -> list:
return []
def do_nothing(params: dict) -> None:
return
def window_log_message(msg: dict) -> None:
log.info(f"LSP: window/logMessage: {msg}")
def check_experimental_status(params: dict) -> None:
"""
Also listen for experimental/serverStatus as a backup signal
"""
if params.get("quiescent") == True:
self.server_ready.set()
self.completions_available.set()
self.server.on_request("client/registerCapability", register_capability_handler)
self.server.on_notification("window/logMessage", window_log_message)
self.server.on_request("workspace/executeClientCommand", execute_client_command_handler)
self.server.on_notification("$/progress", do_nothing)
self.server.on_notification("textDocument/publishDiagnostics", do_nothing)
self.server.on_notification("experimental/serverStatus", check_experimental_status)
log.info("Starting TypeScript server process")
self.server.start()
initialize_params = self._get_initialize_params(self.repository_root_path)
log.info(
"Sending initialize request from LSP client to LSP server and awaiting response",
)
init_response = self.server.send.initialize(initialize_params)
# TypeScript-specific capability checks
assert init_response["capabilities"]["textDocumentSync"] == 2
assert "completionProvider" in init_response["capabilities"]
assert init_response["capabilities"]["completionProvider"] == {
"triggerCharacters": [".", '"', "'", "/", "@", "<"],
"resolveProvider": True,
}
self.server.notify.initialized({})
if self.server_ready.wait(timeout=1.0):
log.info("TypeScript server is ready")
else:
log.info("Timeout waiting for TypeScript server to become ready, proceeding anyway")
# Fallback: assume server is ready after timeout
self.server_ready.set()
self.completions_available.set()
@override
def _get_wait_time_for_cross_file_referencing(self) -> float:
return 1
@override
def _get_preferred_definition(self, definitions: list[ls_types.Location]) -> ls_types.Location:
return prefer_non_node_modules_definition(definitions)
```
--------------------------------------------------------------------------------
/test/solidlsp/test_lsp_protocol_handler_server.py:
--------------------------------------------------------------------------------
```python
"""
Tests for JSON-RPC 2.0 params field handling in LSP protocol.
These tests verify the correct handling of the params field in LSP requests and notifications,
specifically ensuring:
- Void-type methods (shutdown, exit) omit params field entirely
- Methods with explicit params include them unchanged
- Methods with None params receive params: {} for Delphi/FPC compatibility
Reference: JSON-RPC 2.0 spec - params field is optional but must be object/array when present.
"""
from typing import Any
import pytest
from solidlsp.lsp_protocol_handler.server import make_notification, make_request
# =============================================================================
# Shared Assertion Helpers (DRY extraction per AI Panel recommendation)
# =============================================================================
def assert_jsonrpc_structure(
result: dict[str, Any],
expected_method: str,
expected_keys: set[str],
*,
expected_id: Any | None = None,
) -> None:
"""Verify JSON-RPC 2.0 structural requirements with 5-point error messages.
Args:
result: The dict returned by make_request/make_notification
expected_method: The method name that should be in the result
expected_keys: Exact set of keys that should be present
expected_id: If provided, verify the id field matches (for requests)
"""
# Verify jsonrpc field
assert "jsonrpc" in result, (
f"STRUCTURE ERROR: Missing required 'jsonrpc' field.\n"
f"Expected: jsonrpc='2.0'\n"
f"Actual keys: {list(result.keys())}\n"
f"GUIDANCE: All JSON-RPC 2.0 messages must include jsonrpc field."
)
assert result["jsonrpc"] == "2.0", (
f"STRUCTURE ERROR: Invalid jsonrpc version.\n"
f"Expected: '2.0'\n"
f"Actual: {result['jsonrpc']!r}\n"
f"GUIDANCE: JSON-RPC 2.0 requires jsonrpc='2.0' exactly."
)
# Verify method field
assert "method" in result, (
f"STRUCTURE ERROR: Missing required 'method' field.\n"
f"Expected: method='{expected_method}'\n"
f"Actual keys: {list(result.keys())}\n"
f"GUIDANCE: All requests/notifications must include method field."
)
assert result["method"] == expected_method, (
f"STRUCTURE ERROR: Method mismatch.\n"
f"Expected: '{expected_method}'\n"
f"Actual: {result['method']!r}\n"
f"GUIDANCE: Method field must match the requested method name."
)
# Verify id field if expected (requests only)
if expected_id is not None:
assert "id" in result, (
f"STRUCTURE ERROR: Missing required 'id' field for request.\n"
f"Expected: id={expected_id!r}\n"
f"Actual keys: {list(result.keys())}\n"
f"GUIDANCE: JSON-RPC 2.0 requests must include id field."
)
assert result["id"] == expected_id, (
f"STRUCTURE ERROR: Request ID mismatch.\n"
f"Expected: {expected_id!r}\n"
f"Actual: {result['id']!r}\n"
f"GUIDANCE: Request ID must be preserved exactly as provided."
)
# Verify exact key set
actual_keys = set(result.keys())
if actual_keys != expected_keys:
extra = sorted(actual_keys - expected_keys)
missing = sorted(expected_keys - actual_keys)
pytest.fail(
f"STRUCTURE ERROR: Key set mismatch for method '{expected_method}'.\n"
f"Expected keys: {sorted(expected_keys)}\n"
f"Actual keys: {sorted(actual_keys)}\n"
f"Extra keys: {extra}\n"
f"Missing keys: {missing}\n"
f"GUIDANCE: Verify key construction logic for Void-type vs normal methods."
)
def assert_params_omitted(result: dict[str, Any], method: str, req_id: str, input_params: Any = None) -> None:
"""Assert that params field is NOT present (for Void-type methods).
Args:
result: The dict returned by make_request/make_notification
method: Method name for error message context
req_id: Requirement ID (e.g., 'REQ-1', 'REQ-AI-PANEL-GAP')
input_params: If provided, shows what params were passed (for explicit params tests)
"""
if "params" in result:
input_note = f"\nInput params: {input_params}" if input_params is not None else ""
pytest.fail(
f"{req_id} VIOLATED: {method} method MUST omit params field entirely.{input_note}\n"
f"Expected: No 'params' key in result\n"
f"Actual: params={result.get('params')!r}\n"
f"Actual keys: {list(result.keys())}\n"
f"REASON: HLS/rust-analyzer Void types reject any params field (even empty object).\n"
f"GUIDANCE: Void-type constraint takes precedence - implementation must omit params entirely."
)
def assert_params_equal(result: dict[str, Any], expected_params: Any, req_id: str) -> None:
"""Assert that params field equals expected value.
Args:
result: The dict returned by make_request/make_notification
expected_params: The exact params value expected
req_id: Requirement ID for error message context
"""
if "params" not in result:
pytest.fail(
f"{req_id} VIOLATED: params field missing.\n"
f"Expected: params={expected_params!r}\n"
f"Actual keys: {list(result.keys())}\n"
f"GUIDANCE: Non-Void methods must include params field."
)
if result["params"] != expected_params:
pytest.fail(
f"{req_id} VIOLATED: params value mismatch.\n"
f"Expected: {expected_params!r}\n"
f"Actual: {result['params']!r}\n"
f"GUIDANCE: Params must be included exactly as provided (or {{}} for None)."
)
class TestMakeNotificationParamsHandling:
"""Test make_notification() params field handling per JSON-RPC 2.0 spec."""
def test_shutdown_method_omits_params_entirely(self) -> None:
"""REQ-1: Void-type method 'shutdown' MUST omit params field entirely."""
result = make_notification("shutdown", None)
assert_jsonrpc_structure(result, "shutdown", {"jsonrpc", "method"})
assert_params_omitted(result, "shutdown", "REQ-1")
def test_exit_method_omits_params_entirely(self) -> None:
"""REQ-1: Void-type method 'exit' MUST omit params field entirely."""
result = make_notification("exit", None)
assert_jsonrpc_structure(result, "exit", {"jsonrpc", "method"})
assert_params_omitted(result, "exit", "REQ-1")
def test_notification_with_explicit_params_dict(self) -> None:
"""REQ-2: Methods with explicit params MUST include them unchanged."""
test_params = {"uri": "file:///test.py", "languageId": "python"}
result = make_notification("textDocument/didOpen", test_params)
assert_jsonrpc_structure(result, "textDocument/didOpen", {"jsonrpc", "method", "params"})
assert_params_equal(result, test_params, "REQ-2")
def test_notification_with_explicit_params_list(self) -> None:
"""REQ-2: Methods with explicit params (list) MUST include them unchanged."""
test_params = ["arg1", "arg2", "arg3"]
result = make_notification("custom/method", test_params)
assert_jsonrpc_structure(result, "custom/method", {"jsonrpc", "method", "params"})
assert_params_equal(result, test_params, "REQ-2")
def test_notification_with_none_params_sends_empty_dict(self) -> None:
"""REQ-3: Methods with None params MUST send params: {} (Delphi/FPC compat)."""
result = make_notification("textDocument/didChange", None)
assert_jsonrpc_structure(result, "textDocument/didChange", {"jsonrpc", "method", "params"})
assert_params_equal(result, {}, "REQ-3")
def test_notification_with_empty_dict_params(self) -> None:
"""REQ-2: Explicit empty dict params MUST be included unchanged."""
result = make_notification("custom/notify", {})
assert_jsonrpc_structure(result, "custom/notify", {"jsonrpc", "method", "params"})
assert_params_equal(result, {}, "REQ-2")
class TestMakeRequestParamsHandling:
"""Test make_request() params field handling per JSON-RPC 2.0 spec."""
def test_shutdown_request_omits_params_entirely(self) -> None:
"""REQ-1: Void-type method 'shutdown' MUST omit params field entirely (requests)."""
result = make_request("shutdown", request_id=1, params=None)
assert_jsonrpc_structure(result, "shutdown", {"jsonrpc", "method", "id"}, expected_id=1)
assert_params_omitted(result, "shutdown", "REQ-1")
def test_request_with_explicit_params_dict(self) -> None:
"""REQ-2: Requests with explicit params MUST include them unchanged."""
test_params = {"textDocument": {"uri": "file:///test.py"}, "position": {"line": 10, "character": 5}}
result = make_request("textDocument/hover", request_id=42, params=test_params)
assert_jsonrpc_structure(result, "textDocument/hover", {"jsonrpc", "method", "id", "params"}, expected_id=42)
assert_params_equal(result, test_params, "REQ-2")
def test_request_with_none_params_sends_empty_dict(self) -> None:
"""REQ-3: Requests with None params MUST send params: {} (Delphi/FPC compat)."""
result = make_request("workspace/configuration", request_id=100, params=None)
assert_jsonrpc_structure(result, "workspace/configuration", {"jsonrpc", "method", "id", "params"}, expected_id=100)
assert_params_equal(result, {}, "REQ-3")
def test_request_id_preservation(self) -> None:
"""Verify request_id is correctly included in result (string ID)."""
test_id = "unique-request-123"
result = make_request("custom/request", request_id=test_id, params={"key": "value"})
assert_jsonrpc_structure(result, "custom/request", {"jsonrpc", "method", "id", "params"}, expected_id=test_id)
def test_request_with_explicit_params_list(self) -> None:
"""REQ-2: Requests with explicit params (list) MUST include them unchanged."""
test_params = [1, 2, 3]
result = make_request("custom/sum", request_id=99, params=test_params)
assert_jsonrpc_structure(result, "custom/sum", {"jsonrpc", "method", "id", "params"}, expected_id=99)
assert_params_equal(result, test_params, "REQ-2")
class TestVoidMethodsExhaustive:
"""Test all methods that should be treated as Void-type (no params)."""
def test_shutdown_request_ignores_explicit_params_dict(self) -> None:
"""REQ-AI-PANEL-GAP: shutdown MUST omit params even when caller explicitly provides params."""
explicit_params = {"key": "value", "another": "param"}
result = make_request("shutdown", request_id=1, params=explicit_params)
assert_jsonrpc_structure(result, "shutdown", {"jsonrpc", "method", "id"}, expected_id=1)
assert_params_omitted(result, "shutdown", "REQ-AI-PANEL-GAP", input_params=explicit_params)
def test_exit_notification_ignores_explicit_params(self) -> None:
"""REQ-AI-PANEL-GAP: exit MUST omit params even when caller explicitly provides params."""
explicit_params = {"unexpected": "params"}
result = make_notification("exit", explicit_params)
assert_jsonrpc_structure(result, "exit", {"jsonrpc", "method"})
assert_params_omitted(result, "exit", "REQ-AI-PANEL-GAP", input_params=explicit_params)
def test_only_shutdown_and_exit_are_void_methods(self) -> None:
"""REQ-BOUNDARY: Verify EXACTLY shutdown/exit are Void-type - no more, no less."""
# Positive verification: shutdown and exit MUST omit params
shutdown_notif = make_notification("shutdown", None)
exit_notif = make_notification("exit", None)
shutdown_req = make_request("shutdown", 1, None)
assert "params" not in shutdown_notif, "shutdown notification should omit params"
assert "params" not in exit_notif, "exit notification should omit params"
assert "params" not in shutdown_req, "shutdown request should omit params"
# Negative verification: other methods MUST include params (even when None -> {})
non_void_methods = [
"initialize",
"initialized",
"textDocument/didOpen",
"textDocument/didChange",
"textDocument/didClose",
"workspace/didChangeConfiguration",
"workspace/didChangeWatchedFiles",
]
for method in non_void_methods:
result_notif = make_notification(method, None)
result_req = make_request(method, 1, None)
if "params" not in result_notif:
pytest.fail(
f"BOUNDARY VIOLATION: '{method}' notification treated as Void-type.\n"
f"Expected: params field present (should be {{}})\n"
f"Actual keys: {list(result_notif.keys())}\n"
f"GUIDANCE: Only 'shutdown' and 'exit' should omit params field."
)
assert_params_equal(result_notif, {}, f"REQ-3 ({method} notification)")
if "params" not in result_req:
pytest.fail(
f"BOUNDARY VIOLATION: '{method}' request treated as Void-type.\n"
f"Expected: params field present (should be {{}})\n"
f"Actual keys: {list(result_req.keys())}\n"
f"GUIDANCE: Only 'shutdown' and 'exit' should omit params field."
)
assert_params_equal(result_req, {}, f"REQ-3 ({method} request)")
```
--------------------------------------------------------------------------------
/src/serena/tools/jetbrains_tools.py:
--------------------------------------------------------------------------------
```python
import logging
from collections import defaultdict
from typing import Any, Literal
import serena.tools.jetbrains_types as jb
from serena.tools import Tool, ToolMarkerOptional, ToolMarkerSymbolicRead
from serena.tools.jetbrains_plugin_client import JetBrainsPluginClient
log = logging.getLogger(__name__)
class JetBrainsFindSymbolTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Performs a global (or local) search for symbols using the JetBrains backend
"""
def apply(
self,
name_path_pattern: str,
depth: int = 0,
relative_path: str | None = None,
include_body: bool = False,
include_info: bool = False,
search_deps: bool = False,
max_answer_chars: int = -1,
) -> str:
"""
Retrieves information on all symbols/code entities (classes, methods, etc.) based on the given name path pattern.
The returned symbol information can be used for edits or further queries.
Specify `depth > 0` to retrieve children (e.g., methods of a class).
A name path is a path in the symbol tree *within a source file*.
For example, the method `my_method` defined in class `MyClass` would have the name path `MyClass/my_method`.
If a symbol is overloaded (e.g., in Java), a 0-based index is appended (e.g. "MyClass/my_method[0]") to
uniquely identify it.
To search for a symbol, you provide a name path pattern that is used to match against name paths.
It can be
* a simple name (e.g. "method"), which will match any symbol with that name
* a relative path like "class/method", which will match any symbol with that name path suffix
* an absolute name path "/class/method" (absolute name path), which requires an exact match of the full name path within the source file.
Append an index `[i]` to match a specific overload only, e.g. "MyClass/my_method[1]".
:param name_path_pattern: the name path matching pattern (see above)
:param depth: depth up to which descendants shall be retrieved (e.g. use 1 to also retrieve immediate children;
for the case where the symbol is a class, this will return its methods).
Default 0.
:param relative_path: Optional. Restrict search to this file or directory. If None, searches entire codebase.
If a directory is passed, the search will be restricted to the files in that directory.
If a file is passed, the search will be restricted to that file.
If you have some knowledge about the codebase, you should use this parameter, as it will significantly
speed up the search as well as reduce the number of results.
:param include_body: If True, include the symbol's source code. Use judiciously.
:param include_info: whether to include additional info (hover-like, typically including docstring and signature),
about the symbol (ignored if include_body is True).
Default False; info is never included for child symbols and is not included when body is requested.
:param search_deps: If True, also search in project dependencies (e.g., libraries).
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:return: JSON string: a list of symbols (with locations) matching the name.
"""
if relative_path == ".":
relative_path = None
with JetBrainsPluginClient.from_project(self.project) as client:
if include_body:
include_quick_info = False
include_documentation = False
else:
if include_info:
include_documentation = True
include_quick_info = False
else:
# If no additional information is requested, we still include the quick info (type signature)
include_documentation = False
include_quick_info = True
response_dict = client.find_symbol(
name_path=name_path_pattern,
relative_path=relative_path,
depth=depth,
include_body=include_body,
include_documentation=include_documentation,
include_quick_info=include_quick_info,
search_deps=search_deps,
)
result = self._to_json(response_dict)
return self._limit_length(result, max_answer_chars)
class JetBrainsFindReferencingSymbolsTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Finds symbols that reference the given symbol using the JetBrains backend
"""
# TODO: (maybe) - add content snippets showing the references like in LS based version?
def apply(
self,
name_path: str,
relative_path: str,
include_info: bool = False,
max_answer_chars: int = -1,
) -> str:
"""
Finds symbols that reference the symbol at the given `name_path`.
The result will contain metadata about the referencing symbols.
:param name_path: name path of the symbol for which to find references; matching logic as described in find symbol tool.
:param relative_path: the relative path to the file containing the symbol for which to find references.
Note that here you can't pass a directory but must pass a file.
:param include_info: whether to include info (hover-like, typically including docstring and signature)
about the referencing symbols. Default False.
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned. -1 means the
default value from the config will be used.
:return: a list of JSON objects with the symbols referencing the requested symbol
"""
with JetBrainsPluginClient.from_project(self.project) as client:
response_dict = client.find_references(
name_path=name_path,
relative_path=relative_path,
include_quick_info=include_info,
)
result = self._to_json(response_dict)
return self._limit_length(result, max_answer_chars)
class JetBrainsGetSymbolsOverviewTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Retrieves an overview of the top-level symbols within a specified file using the JetBrains backend
"""
USE_COMPACT_FORMAT = True
@staticmethod
def _transform_symbols_to_compact_format(symbols: list[jb.SymbolDTO]) -> dict[str, list]:
"""
Transform symbol overview from verbose format to compact grouped format.
Groups symbols by kind and uses names instead of full symbol objects.
For symbols with children, creates nested dictionaries.
The name_path can be inferred from the hierarchical structure:
- Top-level symbols: name_path = name
- Nested symbols: name_path = parent_name + "/" + name
For example, "convert" under class "ProjectType" has name_path "ProjectType/convert".
"""
result = defaultdict(list)
for symbol in symbols:
kind = symbol.get("type", "Unknown")
name_path = symbol["name_path"]
name = name_path.split("/")[-1]
children = symbol.get("children", [])
if children:
# Symbol has children: create nested dict {name: children_dict}
children_dict = JetBrainsGetSymbolsOverviewTool._transform_symbols_to_compact_format(children)
result[kind].append({name: children_dict})
else:
# Symbol has no children: just add the name
result[kind].append(name) # type: ignore
return result
def apply(
self,
relative_path: str,
depth: int = 0,
max_answer_chars: int = -1,
include_file_documentation: bool = False,
) -> str:
"""
Gets an overview of the top-level symbols in the given file.
Calling this is often a good idea before more targeted reading, searching or editing operations on the code symbols.
Before requesting a symbol overview, it is usually a good idea to narrow down the scope of the overview
by first understanding the basic directory structure of the repository that you can get from memories
or by using the `list_dir` and `find_file` tools (or similar).
:param relative_path: the relative path to the file to get the overview of
:param depth: depth up to which descendants shall be retrieved (e.g., use 1 to also retrieve immediate children).
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:param include_file_documentation: whether to include the file's docstring. Default False.
:return: a JSON object containing the symbols grouped by kind in a compact format.
"""
with JetBrainsPluginClient.from_project(self.project) as client:
symbol_overview = client.get_symbols_overview(
relative_path=relative_path, depth=depth, include_file_documentation=include_file_documentation
)
if self.USE_COMPACT_FORMAT:
symbols = symbol_overview["symbols"]
result: dict[str, Any] = {"symbols": self._transform_symbols_to_compact_format(symbols)}
documentation = symbol_overview.pop("documentation", None)
if documentation:
result["docstring"] = documentation
json_result = self._to_json(result)
else:
json_result = self._to_json(symbol_overview)
return self._limit_length(json_result, max_answer_chars)
class JetBrainsTypeHierarchyTool(Tool, ToolMarkerSymbolicRead, ToolMarkerOptional):
"""
Retrieves the type hierarchy (supertypes and/or subtypes) of a symbol using the JetBrains backend
"""
@staticmethod
def _transform_hierarchy_nodes(nodes: list[jb.TypeHierarchyNodeDTO] | None) -> dict[str, list]:
"""
Transform a list of TypeHierarchyNode into a file-grouped compact format.
Returns a dict where keys are relative_paths and values are lists of either:
- "SymbolNamePath" (leaf node)
- {"SymbolNamePath": {nested_file_grouped_children}} (node with children)
"""
if not nodes:
return {}
result: dict[str, list] = {}
for node in nodes:
symbol = node["symbol"]
name_path = symbol["name_path"]
rel_path = symbol["relative_path"]
children = node.get("children", [])
if rel_path not in result:
result[rel_path] = []
if children:
# Node with children - recurse
nested = JetBrainsTypeHierarchyTool._transform_hierarchy_nodes(children)
result[rel_path].append({name_path: nested})
else:
# Leaf node
result[rel_path].append(name_path)
return result
def apply(
self,
name_path: str,
relative_path: str,
hierarchy_type: Literal["super", "sub", "both"] = "both",
depth: int | None = 1,
max_answer_chars: int = -1,
) -> str:
"""
Gets the type hierarchy of a symbol (supertypes, subtypes, or both).
:param name_path: name path of the symbol for which to get the type hierarchy.
:param relative_path: the relative path to the file containing the symbol.
:param hierarchy_type: which hierarchy to retrieve: "super" for parent classes/interfaces,
"sub" for subclasses/implementations, or "both" for both directions. Default is "sub".
:param depth: depth limit for hierarchy traversal (None or 0 for unlimited). Default is 1.
:param max_answer_chars: max characters for the JSON result. If exceeded, no content is returned.
-1 means the default value from the config will be used.
:return: Compact JSON with file-grouped hierarchy. Error string if not applicable.
"""
with JetBrainsPluginClient.from_project(self.project) as client:
subtypes = None
supertypes = None
levels_not_included = {}
if hierarchy_type in ("super", "both"):
supertypes_response = client.get_supertypes(
name_path=name_path,
relative_path=relative_path,
depth=depth,
)
if "num_levels_not_included" in supertypes_response:
levels_not_included["supertypes"] = supertypes_response["num_levels_not_included"]
supertypes = self._transform_hierarchy_nodes(supertypes_response.get("hierarchy"))
if hierarchy_type in ("sub", "both"):
subtypes_response = client.get_subtypes(
name_path=name_path,
relative_path=relative_path,
depth=depth,
)
if "num_levels_not_included" in subtypes_response:
levels_not_included["subtypes"] = subtypes_response["num_levels_not_included"]
subtypes = self._transform_hierarchy_nodes(subtypes_response.get("hierarchy"))
result_dict: dict[str, dict | list] = {}
if supertypes is not None:
result_dict["supertypes"] = supertypes
if subtypes is not None:
result_dict["subtypes"] = subtypes
if levels_not_included:
result_dict["levels_not_included"] = levels_not_included
result = self._to_json(result_dict)
return self._limit_length(result, max_answer_chars)
```
--------------------------------------------------------------------------------
/test/solidlsp/toml/test_toml_basic.py:
--------------------------------------------------------------------------------
```python
"""
Basic integration tests for the TOML language server functionality.
These tests validate the functionality of the Taplo language server APIs
like request_document_symbols using the TOML test repository.
"""
from pathlib import Path
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
@pytest.mark.toml
class TestTomlLanguageServerBasics:
"""Test basic functionality of the TOML language server (Taplo)."""
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_language_server_initialization(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test that TOML language server can be initialized successfully."""
assert language_server is not None
assert language_server.language == Language.TOML
assert language_server.is_running()
assert Path(language_server.language_server.repository_root_path).resolve() == repo_path.resolve()
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_cargo_file_symbols(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test document symbols detection in Cargo.toml with specific symbol verification."""
all_symbols, root_symbols = language_server.request_document_symbols("Cargo.toml").get_all_symbols_and_roots()
assert all_symbols is not None, "Should return symbols for Cargo.toml"
assert len(all_symbols) > 0, f"Should find symbols in Cargo.toml, found {len(all_symbols)}"
# Verify specific top-level table names are detected
symbol_names = [sym.get("name") for sym in all_symbols]
assert "package" in symbol_names, "Should detect 'package' table in Cargo.toml"
assert "dependencies" in symbol_names, "Should detect 'dependencies' table in Cargo.toml"
assert "dev-dependencies" in symbol_names, "Should detect 'dev-dependencies' table in Cargo.toml"
assert "features" in symbol_names, "Should detect 'features' table in Cargo.toml"
assert "workspace" in symbol_names, "Should detect 'workspace' table in Cargo.toml"
# Verify nested symbols exist (keys under 'package')
assert "name" in symbol_names, "Should detect nested 'name' key"
assert "version" in symbol_names, "Should detect nested 'version' key"
assert "edition" in symbol_names, "Should detect nested 'edition' key"
# Check symbol kind for tables - Taplo uses kind 19 (object) for TOML tables
package_symbol = next((s for s in all_symbols if s.get("name") == "package"), None)
assert package_symbol is not None, "Should find 'package' symbol"
assert package_symbol.get("kind") == 19, "Top-level table should have kind 19 (object)"
dependencies_symbol = next((s for s in all_symbols if s.get("name") == "dependencies"), None)
assert dependencies_symbol is not None, "Should find 'dependencies' symbol"
assert dependencies_symbol.get("kind") == 19, "'dependencies' table should have kind 19 (object)"
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_pyproject_file_symbols(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test document symbols detection in pyproject.toml."""
all_symbols, root_symbols = language_server.request_document_symbols("pyproject.toml").get_all_symbols_and_roots()
assert all_symbols is not None, "Should return symbols for pyproject.toml"
assert len(all_symbols) > 0, f"Should find symbols in pyproject.toml, found {len(all_symbols)}"
# Verify specific top-level table names
symbol_names = [sym.get("name") for sym in all_symbols]
assert "project" in symbol_names, "Should detect 'project' table"
assert "build-system" in symbol_names, "Should detect 'build-system' table"
# Verify tool sections (nested tables)
# These could appear as 'tool' or 'tool.ruff' depending on Taplo's parsing
has_tool_section = any("tool" in name for name in symbol_names if name)
assert has_tool_section, "Should detect tool sections"
# Verify nested keys under project
assert "name" in symbol_names, "Should detect 'name' under project"
assert "version" in symbol_names, "Should detect 'version' under project"
assert "requires-python" in symbol_names or "dependencies" in symbol_names, "Should detect project dependencies"
# Check symbol kind for tables - Taplo uses kind 19 (object) for TOML tables
project_symbol = next((s for s in all_symbols if s.get("name") == "project"), None)
assert project_symbol is not None, "Should find 'project' symbol"
assert project_symbol.get("kind") == 19, "'project' table should have kind 19 (object)"
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_symbol_kinds(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test that TOML symbols have appropriate LSP kinds for different value types."""
all_symbols, root_symbols = language_server.request_document_symbols("Cargo.toml").get_all_symbols_and_roots()
assert all_symbols is not None
assert len(all_symbols) > 0
# Check boolean symbol kind (lto = true at line 22)
# LSP kind 17 = boolean
lto_symbol = next((s for s in all_symbols if s.get("name") == "lto"), None)
assert lto_symbol is not None, "Should find 'lto' boolean symbol"
assert lto_symbol.get("kind") == 17, "'lto' should have kind 17 (boolean)"
# Check number symbol kind (opt-level = 3 at line 23)
# LSP kind 16 = number
opt_level_symbol = next((s for s in all_symbols if s.get("name") == "opt-level"), None)
assert opt_level_symbol is not None, "Should find 'opt-level' number symbol"
assert opt_level_symbol.get("kind") == 16, "'opt-level' should have kind 16 (number)"
# Check string symbol kind (name = "test_project" at line 2)
# LSP kind 15 = string
name_symbols = [s for s in all_symbols if s.get("name") == "name"]
assert len(name_symbols) > 0, "Should find 'name' symbols"
# At least one should be a string
string_name_symbol = next((s for s in name_symbols if s.get("kind") == 15), None)
assert string_name_symbol is not None, "Should find 'name' with kind 15 (string)"
# Check array symbol kind (default = ["feature1"] at line 17)
# LSP kind 18 = array
default_symbol = next((s for s in all_symbols if s.get("name") == "default"), None)
assert default_symbol is not None, "Should find 'default' array symbol"
assert default_symbol.get("kind") == 18, "'default' should have kind 18 (array)"
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_symbols_with_body(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test request_document_symbols with body extraction."""
all_symbols, root_symbols = language_server.request_document_symbols("Cargo.toml").get_all_symbols_and_roots()
assert all_symbols is not None, "Should return symbols for Cargo.toml"
assert len(all_symbols) > 0, "Should have symbols"
# Find the 'package' symbol and verify its body
package_symbol = next((s for s in all_symbols if s.get("name") == "package"), None)
assert package_symbol is not None, "Should find 'package' symbol"
# Check that body exists and contains expected content
# Note: Taplo includes the section header in the body
assert "body" in package_symbol, "'package' symbol should have body"
package_body = package_symbol["body"]
assert 'name = "test_project"' in package_body, "Body should contain 'name' field"
assert 'version = "0.1.0"' in package_body, "Body should contain 'version' field"
assert 'edition = "2021"' in package_body, "Body should contain 'edition' field"
# Find the dependencies symbol and check its body
deps_symbol = next((s for s in all_symbols if s.get("name") == "dependencies"), None)
assert deps_symbol is not None, "Should find 'dependencies' symbol"
assert "body" in deps_symbol, "'dependencies' symbol should have body"
deps_body = deps_symbol["body"]
assert "serde" in deps_body, "Body should contain serde dependency"
assert "tokio" in deps_body, "Body should contain tokio dependency"
# Find the top-level [features] section (not the nested 'features' in serde dependency)
# The [features] section should be kind 19 (object) and at line 15 (0-indexed)
features_symbols = [s for s in all_symbols if s.get("name") == "features"]
# Find the top-level one - should be kind 19 (object) with children
features_symbol = next(
(s for s in features_symbols if s.get("kind") == 19 and s.get("children")),
None,
)
assert features_symbol is not None, "Should find top-level 'features' table symbol"
assert "body" in features_symbol, "'features' symbol should have body"
features_body = features_symbol["body"]
assert "default" in features_body, "Body should contain 'default' feature"
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_symbol_ranges(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test that symbols have proper range information."""
all_symbols, root_symbols = language_server.request_document_symbols("Cargo.toml").get_all_symbols_and_roots()
assert all_symbols is not None
assert len(all_symbols) > 0
# Check the 'package' symbol range - should start at line 0 (0-indexed, actual line 1)
package_symbol = next((s for s in all_symbols if s.get("name") == "package"), None)
assert package_symbol is not None, "Should find 'package' symbol"
assert "range" in package_symbol, "'package' symbol should have range"
package_range = package_symbol["range"]
assert "start" in package_range, "Range should have start"
assert "end" in package_range, "Range should have end"
assert package_range["start"]["line"] == 0, "'package' should start at line 0 (0-indexed, actual line 1)"
# Package block spans from line 1 to line 7 in file (1-indexed)
# In 0-indexed LSP coordinates: line 0 (start) to line 6 or 7 (end)
assert package_range["end"]["line"] >= 6, "'package' should end at or after line 6 (0-indexed)"
# Check a nested symbol range - 'name' under package at line 2 (1-indexed), line 1 (0-indexed)
name_symbols = [s for s in all_symbols if s.get("name") == "name"]
assert len(name_symbols) > 0, "Should find 'name' symbols"
# Find the one under 'package' (should be at line 1 in 0-indexed)
package_name = next((s for s in name_symbols if s["range"]["start"]["line"] == 1), None)
assert package_name is not None, "Should find 'name' under 'package'"
# Check the dependencies range - starts at line 9 (1-indexed), line 8 (0-indexed)
deps_symbol = next((s for s in all_symbols if s.get("name") == "dependencies"), None)
assert deps_symbol is not None, "Should find 'dependencies' symbol"
deps_range = deps_symbol["range"]
assert deps_range["start"]["line"] == 8, "'dependencies' should start at line 8 (0-indexed, actual line 9)"
# Check that range includes line and character positions
assert "line" in package_range["start"], "Start should have line"
assert "character" in package_range["start"], "Start should have character"
assert "line" in package_range["end"], "End should have line"
assert "character" in package_range["end"], "End should have character"
@pytest.mark.parametrize("language_server", [Language.TOML], indirect=True)
@pytest.mark.parametrize("repo_path", [Language.TOML], indirect=True)
def test_toml_nested_table_symbols(self, language_server: SolidLanguageServer, repo_path: Path) -> None:
"""Test detection of nested table symbols like profile.release and tool.ruff."""
# Test Cargo.toml for profile.release
cargo_symbols, _ = language_server.request_document_symbols("Cargo.toml").get_all_symbols_and_roots()
assert cargo_symbols is not None
symbol_names = [sym.get("name") for sym in cargo_symbols]
# Should detect profile.release or profile section
has_profile = any("profile" in name for name in symbol_names if name)
assert has_profile, "Should detect profile section in Cargo.toml"
# Test pyproject.toml for tool sections
pyproject_symbols, _ = language_server.request_document_symbols("pyproject.toml").get_all_symbols_and_roots()
assert pyproject_symbols is not None
pyproject_names = [sym.get("name") for sym in pyproject_symbols]
# Should detect tool.ruff, tool.mypy sections
has_ruff = any("ruff" in name for name in pyproject_names if name)
has_mypy = any("mypy" in name for name in pyproject_names if name)
assert has_ruff or has_mypy, "Should detect tool sections in pyproject.toml"
# Verify pyproject has expected boolean: strict = true
strict_symbol = next((s for s in pyproject_symbols if s.get("name") == "strict"), None)
if strict_symbol:
assert strict_symbol.get("kind") == 17, "'strict' should have kind 17 (boolean)"
```
--------------------------------------------------------------------------------
/test/solidlsp/python/test_python_basic.py:
--------------------------------------------------------------------------------
```python
"""
Basic integration tests for the language server functionality.
These tests validate the functionality of the language server APIs
like request_references using the test repository.
"""
import os
import pytest
from serena.project import Project
from serena.text_utils import LineType
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
@pytest.mark.python
class TestLanguageServerBasics:
"""Test basic functionality of the language server."""
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_references_user_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on the User class."""
# Get references to the User class in models.py
file_path = os.path.join("test_repo", "models.py")
# Line 31 contains the User class definition
# Use selectionRange only
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
user_symbol = next((s for s in symbols[0] if s.get("name") == "User"), None)
if not user_symbol or "selectionRange" not in user_symbol:
raise AssertionError("User symbol or its selectionRange not found")
sel_start = user_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert len(references) > 1, "User class should be referenced in multiple files (using selectionRange if present)"
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_references_item_class(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on the Item class."""
# Get references to the Item class in models.py
file_path = os.path.join("test_repo", "models.py")
# Line 56 contains the Item class definition
# Use selectionRange only
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
item_symbol = next((s for s in symbols[0] if s.get("name") == "Item"), None)
if not item_symbol or "selectionRange" not in item_symbol:
raise AssertionError("Item symbol or its selectionRange not found")
sel_start = item_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
services_references = [ref for ref in references if "services.py" in ref["uri"]]
assert len(services_references) > 0, "At least one reference should be in services.py (using selectionRange if present)"
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_references_function_parameter(self, language_server: SolidLanguageServer) -> None:
"""Test request_references on a function parameter."""
# Get references to the id parameter in get_user method
file_path = os.path.join("test_repo", "services.py")
# Line 24 contains the get_user method with id parameter
# Use selectionRange only
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
get_user_symbol = next((s for s in symbols[0] if s.get("name") == "get_user"), None)
if not get_user_symbol or "selectionRange" not in get_user_symbol:
raise AssertionError("get_user symbol or its selectionRange not found")
sel_start = get_user_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert len(references) > 0, "id parameter should be referenced within the method (using selectionRange if present)"
@pytest.mark.parametrize("language_server", [Language.PYTHON], indirect=True)
def test_request_references_create_user_method(self, language_server: SolidLanguageServer) -> None:
# Get references to the create_user method in UserService
file_path = os.path.join("test_repo", "services.py")
# Line 15 contains the create_user method definition
# Use selectionRange only
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
create_user_symbol = next((s for s in symbols[0] if s.get("name") == "create_user"), None)
if not create_user_symbol or "selectionRange" not in create_user_symbol:
raise AssertionError("create_user symbol or its selectionRange not found")
sel_start = create_user_symbol["selectionRange"]["start"]
references = language_server.request_references(file_path, sel_start["line"], sel_start["character"])
assert len(references) > 1, "Should get valid references for create_user (using selectionRange if present)"
class TestProjectBasics:
@pytest.mark.parametrize("project", [Language.PYTHON], indirect=True)
def test_retrieve_content_around_line(self, project: Project) -> None:
"""Test retrieve_content_around_line functionality with various scenarios."""
file_path = os.path.join("test_repo", "models.py")
# Scenario 1: Just a single line (User class definition)
line_31 = project.retrieve_content_around_line(file_path, 31)
assert len(line_31.lines) == 1
assert "class User(BaseModel):" in line_31.lines[0].line_content
assert line_31.lines[0].line_number == 31
assert line_31.lines[0].match_type == LineType.MATCH
# Scenario 2: Context above and below
with_context_around_user = project.retrieve_content_around_line(file_path, 31, 2, 2)
assert len(with_context_around_user.lines) == 5
# Check line content
assert "class User(BaseModel):" in with_context_around_user.matched_lines[0].line_content
assert with_context_around_user.num_matched_lines == 1
assert " User model representing a system user." in with_context_around_user.lines[4].line_content
# Check line numbers
assert with_context_around_user.lines[0].line_number == 29
assert with_context_around_user.lines[1].line_number == 30
assert with_context_around_user.lines[2].line_number == 31
assert with_context_around_user.lines[3].line_number == 32
assert with_context_around_user.lines[4].line_number == 33
# Check match types
assert with_context_around_user.lines[0].match_type == LineType.BEFORE_MATCH
assert with_context_around_user.lines[1].match_type == LineType.BEFORE_MATCH
assert with_context_around_user.lines[2].match_type == LineType.MATCH
assert with_context_around_user.lines[3].match_type == LineType.AFTER_MATCH
assert with_context_around_user.lines[4].match_type == LineType.AFTER_MATCH
# Scenario 3a: Only context above
with_context_above = project.retrieve_content_around_line(file_path, 31, 3, 0)
assert len(with_context_above.lines) == 4
assert "return cls(id=id, name=name)" in with_context_above.lines[0].line_content
assert "class User(BaseModel):" in with_context_above.matched_lines[0].line_content
assert with_context_above.num_matched_lines == 1
# Check line numbers
assert with_context_above.lines[0].line_number == 28
assert with_context_above.lines[1].line_number == 29
assert with_context_above.lines[2].line_number == 30
assert with_context_above.lines[3].line_number == 31
# Check match types
assert with_context_above.lines[0].match_type == LineType.BEFORE_MATCH
assert with_context_above.lines[1].match_type == LineType.BEFORE_MATCH
assert with_context_above.lines[2].match_type == LineType.BEFORE_MATCH
assert with_context_above.lines[3].match_type == LineType.MATCH
# Scenario 3b: Only context below
with_context_below = project.retrieve_content_around_line(file_path, 31, 0, 3)
assert len(with_context_below.lines) == 4
assert "class User(BaseModel):" in with_context_below.matched_lines[0].line_content
assert with_context_below.num_matched_lines == 1
assert with_context_below.lines[0].line_number == 31
assert with_context_below.lines[1].line_number == 32
assert with_context_below.lines[2].line_number == 33
assert with_context_below.lines[3].line_number == 34
# Check match types
assert with_context_below.lines[0].match_type == LineType.MATCH
assert with_context_below.lines[1].match_type == LineType.AFTER_MATCH
assert with_context_below.lines[2].match_type == LineType.AFTER_MATCH
assert with_context_below.lines[3].match_type == LineType.AFTER_MATCH
# Scenario 4a: Edge case - context above but line is at 0
first_line_with_context_around = project.retrieve_content_around_line(file_path, 0, 2, 1)
assert len(first_line_with_context_around.lines) <= 4 # Should have at most 4 lines (line 0 + 1 below + up to 2 above)
assert first_line_with_context_around.lines[0].line_number <= 2 # First line should be at most line 2
# Check match type for the target line
for line in first_line_with_context_around.lines:
if line.line_number == 0:
assert line.match_type == LineType.MATCH
elif line.line_number < 0:
assert line.match_type == LineType.BEFORE_MATCH
else:
assert line.match_type == LineType.AFTER_MATCH
# Scenario 4b: Edge case - context above but line is at 1
second_line_with_context_above = project.retrieve_content_around_line(file_path, 1, 3, 1)
assert len(second_line_with_context_above.lines) <= 5 # Should have at most 5 lines (line 1 + 1 below + up to 3 above)
assert second_line_with_context_above.lines[0].line_number <= 1 # First line should be at most line 1
# Check match type for the target line
for line in second_line_with_context_above.lines:
if line.line_number == 1:
assert line.match_type == LineType.MATCH
elif line.line_number < 1:
assert line.match_type == LineType.BEFORE_MATCH
else:
assert line.match_type == LineType.AFTER_MATCH
# Scenario 4c: Edge case - context below but line is at the end of file
# First get the total number of lines in the file
all_content = project.read_file(file_path)
total_lines = len(all_content.split("\n"))
last_line_with_context_around = project.retrieve_content_around_line(file_path, total_lines - 1, 1, 3)
assert len(last_line_with_context_around.lines) <= 5 # Should have at most 5 lines (last line + 1 above + up to 3 below)
assert last_line_with_context_around.lines[-1].line_number >= total_lines - 4 # Last line should be at least total_lines - 4
# Check match type for the target line
for line in last_line_with_context_around.lines:
if line.line_number == total_lines - 1:
assert line.match_type == LineType.MATCH
elif line.line_number < total_lines - 1:
assert line.match_type == LineType.BEFORE_MATCH
else:
assert line.match_type == LineType.AFTER_MATCH
@pytest.mark.parametrize("project", [Language.PYTHON], indirect=True)
def test_search_files_for_pattern(self, project: Project) -> None:
"""Test search_files_for_pattern with various patterns and glob filters."""
# Test 1: Search for class definitions across all files
class_pattern = r"class\s+\w+\s*(?:\([^{]*\)|:)"
matches = project.search_source_files_for_pattern(class_pattern)
assert len(matches) > 0
# Should find multiple classes like User, Item, BaseModel, etc.
assert len(matches) >= 5
# Test 2: Search for specific class with include glob
user_class_pattern = r"class\s+User\s*(?:\([^{]*\)|:)"
matches = project.search_source_files_for_pattern(user_class_pattern, paths_include_glob="**/models.py")
assert len(matches) == 1 # Should only find User class in models.py
assert matches[0].source_file_path is not None
assert "models.py" in matches[0].source_file_path
# Test 3: Search for method definitions with exclude glob
method_pattern = r"def\s+\w+\s*\([^)]*\):"
matches = project.search_source_files_for_pattern(method_pattern, paths_exclude_glob="**/models.py")
assert len(matches) > 0
# Should find methods in services.py but not in models.py
assert all(match.source_file_path is not None and "models.py" not in match.source_file_path for match in matches)
# Test 4: Search for specific method with both include and exclude globs
create_user_pattern = r"def\s+create_user\s*\([^)]*\)(?:\s*->[^:]+)?:"
matches = project.search_source_files_for_pattern(
create_user_pattern, paths_include_glob="**/*.py", paths_exclude_glob="**/models.py"
)
assert len(matches) == 1 # Should only find create_user in services.py
assert matches[0].source_file_path is not None
assert "services.py" in matches[0].source_file_path
# Test 5: Search for a pattern that should appear in multiple files
init_pattern = r"def\s+__init__\s*\([^)]*\):"
matches = project.search_source_files_for_pattern(init_pattern)
assert len(matches) > 1 # Should find __init__ in multiple classes
# Should find __init__ in both models.py and services.py
assert any(match.source_file_path is not None and "models.py" in match.source_file_path for match in matches)
assert any(match.source_file_path is not None and "services.py" in match.source_file_path for match in matches)
# Test 6: Search with a pattern that should have no matches
no_match_pattern = r"def\s+this_method_does_not_exist\s*\([^)]*\):"
matches = project.search_source_files_for_pattern(no_match_pattern)
assert len(matches) == 0
```
--------------------------------------------------------------------------------
/test/solidlsp/vue/test_vue_rename.py:
--------------------------------------------------------------------------------
```python
import os
import pytest
from solidlsp import SolidLanguageServer
from solidlsp.ls_config import Language
pytestmark = pytest.mark.vue
class TestVueRename:
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_rename_function_within_single_file(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "components", "CalculatorInput.vue")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
handle_digit_symbol = next((s for s in symbols[0] if s.get("name") == "handleDigit"), None)
if not handle_digit_symbol or "selectionRange" not in handle_digit_symbol:
pytest.skip("handleDigit symbol not found - test fixture may need updating")
sel_start = handle_digit_symbol["selectionRange"]["start"]
workspace_edit = language_server.request_rename_symbol_edit(file_path, sel_start["line"], sel_start["character"], "processDigit")
assert workspace_edit is not None, "Should return WorkspaceEdit for rename operation"
has_changes = "changes" in workspace_edit and workspace_edit["changes"]
has_document_changes = "documentChanges" in workspace_edit and workspace_edit["documentChanges"]
assert has_changes or has_document_changes, "WorkspaceEdit should contain either 'changes' or 'documentChanges'"
if has_changes:
changes = workspace_edit["changes"]
assert len(changes) > 0, "Should have at least one file with changes"
calculator_input_files = [uri for uri in changes.keys() if "CalculatorInput.vue" in uri]
assert len(calculator_input_files) > 0, f"Should have edits for CalculatorInput.vue. Found edits for: {list(changes.keys())}"
file_edits = changes[calculator_input_files[0]]
assert len(file_edits) > 0, "Should have at least one TextEdit for the renamed symbol"
for edit in file_edits:
assert "range" in edit, "TextEdit should have a range"
assert "newText" in edit, "TextEdit should have newText"
assert edit["newText"] == "processDigit", f"newText should be 'processDigit', got {edit['newText']}"
assert "start" in edit["range"], "Range should have start position"
assert "end" in edit["range"], "Range should have end position"
assert "line" in edit["range"]["start"], "Start position should have line number"
assert "character" in edit["range"]["start"], "Start position should have character offset"
elif has_document_changes:
document_changes = workspace_edit["documentChanges"]
assert isinstance(document_changes, list), "documentChanges should be a list"
assert len(document_changes) > 0, "Should have at least one document change"
calculator_input_changes = [dc for dc in document_changes if "CalculatorInput.vue" in dc.get("textDocument", {}).get("uri", "")]
assert len(calculator_input_changes) > 0, "Should have edits for CalculatorInput.vue"
for change in calculator_input_changes:
assert "textDocument" in change, "Document change should have textDocument"
assert "edits" in change, "Document change should have edits"
edits = change["edits"]
assert len(edits) > 0, "Should have at least one TextEdit for the renamed symbol"
for edit in edits:
assert "range" in edit, "TextEdit should have a range"
assert "newText" in edit, "TextEdit should have newText"
assert edit["newText"] == "processDigit", f"newText should be 'processDigit', got {edit['newText']}"
assert "start" in edit["range"], "Range should have start position"
assert "end" in edit["range"], "Range should have end position"
assert "line" in edit["range"]["start"], "Start position should have line number"
assert "character" in edit["range"]["start"], "Start position should have character offset"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_rename_composable_function_cross_file(self, language_server: SolidLanguageServer) -> None:
composable_file = os.path.join("src", "composables", "useFormatter.ts")
symbols = language_server.request_document_symbols(composable_file).get_all_symbols_and_roots()
use_formatter_symbol = next((s for s in symbols[0] if s.get("name") == "useFormatter"), None)
if not use_formatter_symbol or "selectionRange" not in use_formatter_symbol:
pytest.skip("useFormatter symbol not found - test fixture may need updating")
sel_start = use_formatter_symbol["selectionRange"]["start"]
workspace_edit = language_server.request_rename_symbol_edit(
composable_file, sel_start["line"], sel_start["character"], "useNumberFormatter"
)
assert workspace_edit is not None, "Should return WorkspaceEdit for cross-file rename"
has_changes = "changes" in workspace_edit and workspace_edit["changes"]
has_document_changes = "documentChanges" in workspace_edit and workspace_edit["documentChanges"]
assert has_changes or has_document_changes, "WorkspaceEdit should contain either 'changes' or 'documentChanges'"
if has_changes:
changes = workspace_edit["changes"]
assert len(changes) > 0, "Should have at least one file with changes"
composable_files = [uri for uri in changes.keys() if "useFormatter.ts" in uri]
assert len(composable_files) > 0, f"Should have edits for useFormatter.ts (definition). Found edits for: {list(changes.keys())}"
for uri, edits in changes.items():
assert len(edits) > 0, f"File {uri} should have at least one edit"
for edit in edits:
assert "range" in edit, f"TextEdit in {uri} should have a range"
assert "newText" in edit, f"TextEdit in {uri} should have newText"
assert edit["newText"] == "useNumberFormatter", f"newText should be 'useNumberFormatter', got {edit['newText']}"
assert "start" in edit["range"], f"Range in {uri} should have start position"
assert "end" in edit["range"], f"Range in {uri} should have end position"
elif has_document_changes:
document_changes = workspace_edit["documentChanges"]
assert isinstance(document_changes, list), "documentChanges should be a list"
assert len(document_changes) > 0, "Should have at least one document change"
composable_changes = [dc for dc in document_changes if "useFormatter.ts" in dc.get("textDocument", {}).get("uri", "")]
assert (
len(composable_changes) > 0
), f"Should have edits for useFormatter.ts (definition). Found changes for: {[dc.get('textDocument', {}).get('uri', '') for dc in document_changes]}"
for change in document_changes:
assert "textDocument" in change, "Document change should have textDocument"
assert "edits" in change, "Document change should have edits"
uri = change["textDocument"]["uri"]
edits = change["edits"]
assert len(edits) > 0, f"File {uri} should have at least one edit"
for edit in edits:
assert "range" in edit, f"TextEdit in {uri} should have a range"
assert "newText" in edit, f"TextEdit in {uri} should have newText"
assert edit["newText"] == "useNumberFormatter", f"newText should be 'useNumberFormatter', got {edit['newText']}"
assert "start" in edit["range"], f"Range in {uri} should have start position"
assert "end" in edit["range"], f"Range in {uri} should have end position"
@pytest.mark.parametrize("language_server", [Language.VUE], indirect=True)
def test_rename_verifies_correct_file_paths_and_ranges(self, language_server: SolidLanguageServer) -> None:
file_path = os.path.join("src", "App.vue")
symbols = language_server.request_document_symbols(file_path).get_all_symbols_and_roots()
app_title_symbol = next((s for s in symbols[0] if s.get("name") == "appTitle"), None)
if not app_title_symbol or "selectionRange" not in app_title_symbol:
pytest.skip("appTitle symbol not found - test fixture may need updating")
sel_start = app_title_symbol["selectionRange"]["start"]
workspace_edit = language_server.request_rename_symbol_edit(
file_path, sel_start["line"], sel_start["character"], "applicationTitle"
)
assert workspace_edit is not None, "Should return WorkspaceEdit for rename operation"
assert isinstance(workspace_edit, dict), "WorkspaceEdit should be a dictionary"
has_changes = "changes" in workspace_edit and workspace_edit["changes"]
has_document_changes = "documentChanges" in workspace_edit and workspace_edit["documentChanges"]
assert has_changes or has_document_changes, "WorkspaceEdit must have 'changes' or 'documentChanges'"
if has_changes:
changes = workspace_edit["changes"]
assert isinstance(changes, dict), "changes should be a dict mapping URIs to TextEdit lists"
assert len(changes) > 0, "Should have edits for at least one file"
for uri, edits in changes.items():
assert isinstance(uri, str), f"URI should be a string, got {type(uri)}"
assert uri.startswith("file://"), f"URI should start with 'file://', got {uri}"
assert isinstance(edits, list), f"Edits for {uri} should be a list, got {type(edits)}"
assert len(edits) > 0, f"Should have at least one edit for {uri}"
for idx, edit in enumerate(edits):
assert isinstance(edit, dict), f"Edit {idx} in {uri} should be a dict, got {type(edit)}"
assert "range" in edit, f"Edit {idx} in {uri} missing 'range'"
assert "newText" in edit, f"Edit {idx} in {uri} missing 'newText'"
range_obj = edit["range"]
assert "start" in range_obj, f"Edit {idx} range in {uri} missing 'start'"
assert "end" in range_obj, f"Edit {idx} range in {uri} missing 'end'"
for pos_name in ["start", "end"]:
pos = range_obj[pos_name]
assert "line" in pos, f"Edit {idx} range {pos_name} in {uri} missing 'line'"
assert "character" in pos, f"Edit {idx} range {pos_name} in {uri} missing 'character'"
assert isinstance(pos["line"], int), f"Line should be int, got {type(pos['line'])}"
assert isinstance(pos["character"], int), f"Character should be int, got {type(pos['character'])}"
assert pos["line"] >= 0, f"Line number should be >= 0, got {pos['line']}"
assert pos["character"] >= 0, f"Character offset should be >= 0, got {pos['character']}"
assert isinstance(edit["newText"], str), f"newText should be string, got {type(edit['newText'])}"
assert edit["newText"] == "applicationTitle", f"newText should be 'applicationTitle', got {edit['newText']}"
elif has_document_changes:
document_changes = workspace_edit["documentChanges"]
assert isinstance(document_changes, list), "documentChanges should be a list"
assert len(document_changes) > 0, "Should have at least one document change"
for change in document_changes:
assert isinstance(change, dict), "Each document change should be a dict"
assert "textDocument" in change, "Document change should have textDocument"
assert "edits" in change, "Document change should have edits"
text_doc = change["textDocument"]
assert "uri" in text_doc, "textDocument should have uri"
assert text_doc["uri"].startswith("file://"), f"URI should start with 'file://', got {text_doc['uri']}"
edits = change["edits"]
assert isinstance(edits, list), "edits should be a list"
assert len(edits) > 0, "Should have at least one edit"
for idx, edit in enumerate(edits):
assert isinstance(edit, dict), f"Edit {idx} in {text_doc['uri']} should be a dict, got {type(edit)}"
assert "range" in edit, f"Edit {idx} in {text_doc['uri']} missing 'range'"
assert "newText" in edit, f"Edit {idx} in {text_doc['uri']} missing 'newText'"
range_obj = edit["range"]
assert "start" in range_obj, f"Edit {idx} range in {text_doc['uri']} missing 'start'"
assert "end" in range_obj, f"Edit {idx} range in {text_doc['uri']} missing 'end'"
for pos_name in ["start", "end"]:
pos = range_obj[pos_name]
assert "line" in pos, f"Edit {idx} range {pos_name} in {text_doc['uri']} missing 'line'"
assert "character" in pos, f"Edit {idx} range {pos_name} in {text_doc['uri']} missing 'character'"
assert isinstance(pos["line"], int), f"Line should be int, got {type(pos['line'])}"
assert isinstance(pos["character"], int), f"Character should be int, got {type(pos['character'])}"
assert pos["line"] >= 0, f"Line number should be >= 0, got {pos['line']}"
assert pos["character"] >= 0, f"Character offset should be >= 0, got {pos['character']}"
assert isinstance(edit["newText"], str), f"newText should be string, got {type(edit['newText'])}"
assert edit["newText"] == "applicationTitle", f"newText should be 'applicationTitle', got {edit['newText']}"
```