#
tokens: 48953/50000 9/807 files (page 23/35)
lines: off (toggle) GitHub
raw markdown copy
This is page 23 of 35. Use http://codebase.md/googleapis/genai-toolbox?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .ci
│   ├── continuous.release.cloudbuild.yaml
│   ├── generate_release_table.sh
│   ├── integration.cloudbuild.yaml
│   ├── quickstart_test
│   │   ├── go.integration.cloudbuild.yaml
│   │   ├── js.integration.cloudbuild.yaml
│   │   ├── py.integration.cloudbuild.yaml
│   │   ├── run_go_tests.sh
│   │   ├── run_js_tests.sh
│   │   ├── run_py_tests.sh
│   │   └── setup_hotels_sample.sql
│   ├── test_with_coverage.sh
│   └── versioned.release.cloudbuild.yaml
├── .github
│   ├── auto-label.yaml
│   ├── blunderbuss.yml
│   ├── CODEOWNERS
│   ├── header-checker-lint.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── question.yml
│   ├── label-sync.yml
│   ├── labels.yaml
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-please.yml
│   ├── renovate.json5
│   ├── sync-repo-settings.yaml
│   └── workflows
│       ├── cloud_build_failure_reporter.yml
│       ├── deploy_dev_docs.yaml
│       ├── deploy_previous_version_docs.yaml
│       ├── deploy_versioned_docs.yaml
│       ├── docs_deploy.yaml
│       ├── docs_preview_clean.yaml
│       ├── docs_preview_deploy.yaml
│       ├── lint.yaml
│       ├── schedule_reporter.yml
│       ├── sync-labels.yaml
│       └── tests.yaml
├── .gitignore
├── .gitmodules
├── .golangci.yaml
├── .hugo
│   ├── archetypes
│   │   └── default.md
│   ├── assets
│   │   ├── icons
│   │   │   └── logo.svg
│   │   └── scss
│   │       ├── _styles_project.scss
│   │       └── _variables_project.scss
│   ├── go.mod
│   ├── go.sum
│   ├── hugo.toml
│   ├── layouts
│   │   ├── _default
│   │   │   └── home.releases.releases
│   │   ├── index.llms-full.txt
│   │   ├── index.llms.txt
│   │   ├── partials
│   │   │   ├── hooks
│   │   │   │   └── head-end.html
│   │   │   ├── navbar-version-selector.html
│   │   │   ├── page-meta-links.html
│   │   │   └── td
│   │   │       └── render-heading.html
│   │   ├── robot.txt
│   │   └── shortcodes
│   │       ├── include.html
│   │       ├── ipynb.html
│   │       └── regionInclude.html
│   ├── package-lock.json
│   ├── package.json
│   └── static
│       ├── favicons
│       │   ├── android-chrome-192x192.png
│       │   ├── android-chrome-512x512.png
│       │   ├── apple-touch-icon.png
│       │   ├── favicon-16x16.png
│       │   ├── favicon-32x32.png
│       │   └── favicon.ico
│       └── js
│           └── w3.js
├── CHANGELOG.md
├── cmd
│   ├── options_test.go
│   ├── options.go
│   ├── root_test.go
│   ├── root.go
│   └── version.txt
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── DEVELOPER.md
├── Dockerfile
├── docs
│   └── en
│       ├── _index.md
│       ├── about
│       │   ├── _index.md
│       │   └── faq.md
│       ├── concepts
│       │   ├── _index.md
│       │   └── telemetry
│       │       ├── index.md
│       │       ├── telemetry_flow.png
│       │       └── telemetry_traces.png
│       ├── getting-started
│       │   ├── _index.md
│       │   ├── colab_quickstart.ipynb
│       │   ├── configure.md
│       │   ├── introduction
│       │   │   ├── _index.md
│       │   │   └── architecture.png
│       │   ├── local_quickstart_go.md
│       │   ├── local_quickstart_js.md
│       │   ├── local_quickstart.md
│       │   ├── mcp_quickstart
│       │   │   ├── _index.md
│       │   │   ├── inspector_tools.png
│       │   │   └── inspector.png
│       │   └── quickstart
│       │       ├── go
│       │       │   ├── genAI
│       │       │   │   ├── go.mod
│       │       │   │   ├── go.sum
│       │       │   │   └── quickstart.go
│       │       │   ├── genkit
│       │       │   │   ├── go.mod
│       │       │   │   ├── go.sum
│       │       │   │   └── quickstart.go
│       │       │   ├── langchain
│       │       │   │   ├── go.mod
│       │       │   │   ├── go.sum
│       │       │   │   └── quickstart.go
│       │       │   ├── openAI
│       │       │   │   ├── go.mod
│       │       │   │   ├── go.sum
│       │       │   │   └── quickstart.go
│       │       │   └── quickstart_test.go
│       │       ├── golden.txt
│       │       ├── js
│       │       │   ├── genAI
│       │       │   │   ├── package-lock.json
│       │       │   │   ├── package.json
│       │       │   │   └── quickstart.js
│       │       │   ├── genkit
│       │       │   │   ├── package-lock.json
│       │       │   │   ├── package.json
│       │       │   │   └── quickstart.js
│       │       │   ├── langchain
│       │       │   │   ├── package-lock.json
│       │       │   │   ├── package.json
│       │       │   │   └── quickstart.js
│       │       │   ├── llamaindex
│       │       │   │   ├── package-lock.json
│       │       │   │   ├── package.json
│       │       │   │   └── quickstart.js
│       │       │   └── quickstart.test.js
│       │       ├── python
│       │       │   ├── __init__.py
│       │       │   ├── adk
│       │       │   │   ├── quickstart.py
│       │       │   │   └── requirements.txt
│       │       │   ├── core
│       │       │   │   ├── quickstart.py
│       │       │   │   └── requirements.txt
│       │       │   ├── langchain
│       │       │   │   ├── quickstart.py
│       │       │   │   └── requirements.txt
│       │       │   ├── llamaindex
│       │       │   │   ├── quickstart.py
│       │       │   │   └── requirements.txt
│       │       │   └── quickstart_test.py
│       │       └── shared
│       │           ├── cloud_setup.md
│       │           ├── configure_toolbox.md
│       │           └── database_setup.md
│       ├── how-to
│       │   ├── _index.md
│       │   ├── connect_via_geminicli.md
│       │   ├── connect_via_mcp.md
│       │   ├── connect-ide
│       │   │   ├── _index.md
│       │   │   ├── alloydb_pg_admin_mcp.md
│       │   │   ├── alloydb_pg_mcp.md
│       │   │   ├── bigquery_mcp.md
│       │   │   ├── cloud_sql_mssql_admin_mcp.md
│       │   │   ├── cloud_sql_mssql_mcp.md
│       │   │   ├── cloud_sql_mysql_admin_mcp.md
│       │   │   ├── cloud_sql_mysql_mcp.md
│       │   │   ├── cloud_sql_pg_admin_mcp.md
│       │   │   ├── cloud_sql_pg_mcp.md
│       │   │   ├── firestore_mcp.md
│       │   │   ├── looker_mcp.md
│       │   │   ├── mssql_mcp.md
│       │   │   ├── mysql_mcp.md
│       │   │   ├── neo4j_mcp.md
│       │   │   ├── postgres_mcp.md
│       │   │   ├── spanner_mcp.md
│       │   │   └── sqlite_mcp.md
│       │   ├── deploy_docker.md
│       │   ├── deploy_gke.md
│       │   ├── deploy_toolbox.md
│       │   ├── export_telemetry.md
│       │   └── toolbox-ui
│       │       ├── edit-headers.gif
│       │       ├── edit-headers.png
│       │       ├── index.md
│       │       ├── optional-param-checked.png
│       │       ├── optional-param-unchecked.png
│       │       ├── run-tool.gif
│       │       ├── tools.png
│       │       └── toolsets.png
│       ├── reference
│       │   ├── _index.md
│       │   ├── cli.md
│       │   └── prebuilt-tools.md
│       ├── resources
│       │   ├── _index.md
│       │   ├── authServices
│       │   │   ├── _index.md
│       │   │   └── google.md
│       │   ├── sources
│       │   │   ├── _index.md
│       │   │   ├── alloydb-admin.md
│       │   │   ├── alloydb-pg.md
│       │   │   ├── bigquery.md
│       │   │   ├── bigtable.md
│       │   │   ├── cassandra.md
│       │   │   ├── clickhouse.md
│       │   │   ├── cloud-monitoring.md
│       │   │   ├── cloud-sql-admin.md
│       │   │   ├── cloud-sql-mssql.md
│       │   │   ├── cloud-sql-mysql.md
│       │   │   ├── cloud-sql-pg.md
│       │   │   ├── couchbase.md
│       │   │   ├── dataplex.md
│       │   │   ├── dgraph.md
│       │   │   ├── firebird.md
│       │   │   ├── firestore.md
│       │   │   ├── http.md
│       │   │   ├── looker.md
│       │   │   ├── mongodb.md
│       │   │   ├── mssql.md
│       │   │   ├── mysql.md
│       │   │   ├── neo4j.md
│       │   │   ├── oceanbase.md
│       │   │   ├── oracle.md
│       │   │   ├── postgres.md
│       │   │   ├── redis.md
│       │   │   ├── spanner.md
│       │   │   ├── sqlite.md
│       │   │   ├── tidb.md
│       │   │   ├── trino.md
│       │   │   ├── valkey.md
│       │   │   └── yugabytedb.md
│       │   └── tools
│       │       ├── _index.md
│       │       ├── alloydb
│       │       │   ├── _index.md
│       │       │   ├── alloydb-create-cluster.md
│       │       │   ├── alloydb-create-instance.md
│       │       │   ├── alloydb-create-user.md
│       │       │   ├── alloydb-get-cluster.md
│       │       │   ├── alloydb-get-instance.md
│       │       │   ├── alloydb-get-user.md
│       │       │   ├── alloydb-list-clusters.md
│       │       │   ├── alloydb-list-instances.md
│       │       │   ├── alloydb-list-users.md
│       │       │   └── alloydb-wait-for-operation.md
│       │       ├── alloydbainl
│       │       │   ├── _index.md
│       │       │   └── alloydb-ai-nl.md
│       │       ├── bigquery
│       │       │   ├── _index.md
│       │       │   ├── bigquery-analyze-contribution.md
│       │       │   ├── bigquery-conversational-analytics.md
│       │       │   ├── bigquery-execute-sql.md
│       │       │   ├── bigquery-forecast.md
│       │       │   ├── bigquery-get-dataset-info.md
│       │       │   ├── bigquery-get-table-info.md
│       │       │   ├── bigquery-list-dataset-ids.md
│       │       │   ├── bigquery-list-table-ids.md
│       │       │   ├── bigquery-search-catalog.md
│       │       │   └── bigquery-sql.md
│       │       ├── bigtable
│       │       │   ├── _index.md
│       │       │   └── bigtable-sql.md
│       │       ├── cassandra
│       │       │   ├── _index.md
│       │       │   └── cassandra-cql.md
│       │       ├── clickhouse
│       │       │   ├── _index.md
│       │       │   ├── clickhouse-execute-sql.md
│       │       │   ├── clickhouse-list-databases.md
│       │       │   ├── clickhouse-list-tables.md
│       │       │   └── clickhouse-sql.md
│       │       ├── cloudmonitoring
│       │       │   ├── _index.md
│       │       │   └── cloud-monitoring-query-prometheus.md
│       │       ├── cloudsql
│       │       │   ├── _index.md
│       │       │   ├── cloudsqlcreatedatabase.md
│       │       │   ├── cloudsqlcreateusers.md
│       │       │   ├── cloudsqlgetinstances.md
│       │       │   ├── cloudsqllistdatabases.md
│       │       │   ├── cloudsqllistinstances.md
│       │       │   ├── cloudsqlmssqlcreateinstance.md
│       │       │   ├── cloudsqlmysqlcreateinstance.md
│       │       │   ├── cloudsqlpgcreateinstances.md
│       │       │   └── cloudsqlwaitforoperation.md
│       │       ├── couchbase
│       │       │   ├── _index.md
│       │       │   └── couchbase-sql.md
│       │       ├── dataform
│       │       │   ├── _index.md
│       │       │   └── dataform-compile-local.md
│       │       ├── dataplex
│       │       │   ├── _index.md
│       │       │   ├── dataplex-lookup-entry.md
│       │       │   ├── dataplex-search-aspect-types.md
│       │       │   └── dataplex-search-entries.md
│       │       ├── dgraph
│       │       │   ├── _index.md
│       │       │   └── dgraph-dql.md
│       │       ├── firebird
│       │       │   ├── _index.md
│       │       │   ├── firebird-execute-sql.md
│       │       │   └── firebird-sql.md
│       │       ├── firestore
│       │       │   ├── _index.md
│       │       │   ├── firestore-add-documents.md
│       │       │   ├── firestore-delete-documents.md
│       │       │   ├── firestore-get-documents.md
│       │       │   ├── firestore-get-rules.md
│       │       │   ├── firestore-list-collections.md
│       │       │   ├── firestore-query-collection.md
│       │       │   ├── firestore-query.md
│       │       │   ├── firestore-update-document.md
│       │       │   └── firestore-validate-rules.md
│       │       ├── http
│       │       │   ├── _index.md
│       │       │   └── http.md
│       │       ├── looker
│       │       │   ├── _index.md
│       │       │   ├── looker-add-dashboard-element.md
│       │       │   ├── looker-conversational-analytics.md
│       │       │   ├── looker-create-project-file.md
│       │       │   ├── looker-delete-project-file.md
│       │       │   ├── looker-dev-mode.md
│       │       │   ├── looker-get-dashboards.md
│       │       │   ├── looker-get-dimensions.md
│       │       │   ├── looker-get-explores.md
│       │       │   ├── looker-get-filters.md
│       │       │   ├── looker-get-looks.md
│       │       │   ├── looker-get-measures.md
│       │       │   ├── looker-get-models.md
│       │       │   ├── looker-get-parameters.md
│       │       │   ├── looker-get-project-file.md
│       │       │   ├── looker-get-project-files.md
│       │       │   ├── looker-get-projects.md
│       │       │   ├── looker-health-analyze.md
│       │       │   ├── looker-health-pulse.md
│       │       │   ├── looker-health-vacuum.md
│       │       │   ├── looker-make-dashboard.md
│       │       │   ├── looker-make-look.md
│       │       │   ├── looker-query-sql.md
│       │       │   ├── looker-query-url.md
│       │       │   ├── looker-query.md
│       │       │   ├── looker-run-look.md
│       │       │   └── looker-update-project-file.md
│       │       ├── mongodb
│       │       │   ├── _index.md
│       │       │   ├── mongodb-aggregate.md
│       │       │   ├── mongodb-delete-many.md
│       │       │   ├── mongodb-delete-one.md
│       │       │   ├── mongodb-find-one.md
│       │       │   ├── mongodb-find.md
│       │       │   ├── mongodb-insert-many.md
│       │       │   ├── mongodb-insert-one.md
│       │       │   ├── mongodb-update-many.md
│       │       │   └── mongodb-update-one.md
│       │       ├── mssql
│       │       │   ├── _index.md
│       │       │   ├── mssql-execute-sql.md
│       │       │   ├── mssql-list-tables.md
│       │       │   └── mssql-sql.md
│       │       ├── mysql
│       │       │   ├── _index.md
│       │       │   ├── mysql-execute-sql.md
│       │       │   ├── mysql-list-active-queries.md
│       │       │   ├── mysql-list-table-fragmentation.md
│       │       │   ├── mysql-list-tables-missing-unique-indexes.md
│       │       │   ├── mysql-list-tables.md
│       │       │   └── mysql-sql.md
│       │       ├── neo4j
│       │       │   ├── _index.md
│       │       │   ├── neo4j-cypher.md
│       │       │   ├── neo4j-execute-cypher.md
│       │       │   └── neo4j-schema.md
│       │       ├── oceanbase
│       │       │   ├── _index.md
│       │       │   ├── oceanbase-execute-sql.md
│       │       │   └── oceanbase-sql.md
│       │       ├── oracle
│       │       │   ├── _index.md
│       │       │   ├── oracle-execute-sql.md
│       │       │   └── oracle-sql.md
│       │       ├── postgres
│       │       │   ├── _index.md
│       │       │   ├── postgres-execute-sql.md
│       │       │   ├── postgres-list-active-queries.md
│       │       │   ├── postgres-list-available-extensions.md
│       │       │   ├── postgres-list-installed-extensions.md
│       │       │   ├── postgres-list-tables.md
│       │       │   └── postgres-sql.md
│       │       ├── redis
│       │       │   ├── _index.md
│       │       │   └── redis.md
│       │       ├── spanner
│       │       │   ├── _index.md
│       │       │   ├── spanner-execute-sql.md
│       │       │   ├── spanner-list-tables.md
│       │       │   └── spanner-sql.md
│       │       ├── sqlite
│       │       │   ├── _index.md
│       │       │   ├── sqlite-execute-sql.md
│       │       │   └── sqlite-sql.md
│       │       ├── tidb
│       │       │   ├── _index.md
│       │       │   ├── tidb-execute-sql.md
│       │       │   └── tidb-sql.md
│       │       ├── trino
│       │       │   ├── _index.md
│       │       │   ├── trino-execute-sql.md
│       │       │   └── trino-sql.md
│       │       ├── utility
│       │       │   ├── _index.md
│       │       │   └── wait.md
│       │       ├── valkey
│       │       │   ├── _index.md
│       │       │   └── valkey.md
│       │       └── yuagbytedb
│       │           ├── _index.md
│       │           └── yugabytedb-sql.md
│       ├── samples
│       │   ├── _index.md
│       │   ├── alloydb
│       │   │   ├── _index.md
│       │   │   ├── ai-nl
│       │   │   │   ├── alloydb_ai_nl.ipynb
│       │   │   │   └── index.md
│       │   │   └── mcp_quickstart.md
│       │   ├── bigquery
│       │   │   ├── _index.md
│       │   │   ├── colab_quickstart_bigquery.ipynb
│       │   │   ├── local_quickstart.md
│       │   │   └── mcp_quickstart
│       │   │       ├── _index.md
│       │   │       ├── inspector_tools.png
│       │   │       └── inspector.png
│       │   └── looker
│       │       ├── _index.md
│       │       ├── looker_gemini_oauth
│       │       │   ├── _index.md
│       │       │   ├── authenticated.png
│       │       │   ├── authorize.png
│       │       │   └── registration.png
│       │       ├── looker_gemini.md
│       │       └── looker_mcp_inspector
│       │           ├── _index.md
│       │           ├── inspector_tools.png
│       │           └── inspector.png
│       └── sdks
│           ├── _index.md
│           ├── go-sdk.md
│           ├── js-sdk.md
│           └── python-sdk.md
├── gemini-extension.json
├── go.mod
├── go.sum
├── internal
│   ├── auth
│   │   ├── auth.go
│   │   └── google
│   │       └── google.go
│   ├── log
│   │   ├── handler.go
│   │   ├── log_test.go
│   │   ├── log.go
│   │   └── logger.go
│   ├── prebuiltconfigs
│   │   ├── prebuiltconfigs_test.go
│   │   ├── prebuiltconfigs.go
│   │   └── tools
│   │       ├── alloydb-postgres-admin.yaml
│   │       ├── alloydb-postgres-observability.yaml
│   │       ├── alloydb-postgres.yaml
│   │       ├── bigquery.yaml
│   │       ├── clickhouse.yaml
│   │       ├── cloud-sql-mssql-admin.yaml
│   │       ├── cloud-sql-mssql-observability.yaml
│   │       ├── cloud-sql-mssql.yaml
│   │       ├── cloud-sql-mysql-admin.yaml
│   │       ├── cloud-sql-mysql-observability.yaml
│   │       ├── cloud-sql-mysql.yaml
│   │       ├── cloud-sql-postgres-admin.yaml
│   │       ├── cloud-sql-postgres-observability.yaml
│   │       ├── cloud-sql-postgres.yaml
│   │       ├── dataplex.yaml
│   │       ├── firestore.yaml
│   │       ├── looker-conversational-analytics.yaml
│   │       ├── looker.yaml
│   │       ├── mssql.yaml
│   │       ├── mysql.yaml
│   │       ├── neo4j.yaml
│   │       ├── oceanbase.yaml
│   │       ├── postgres.yaml
│   │       ├── spanner-postgres.yaml
│   │       ├── spanner.yaml
│   │       └── sqlite.yaml
│   ├── server
│   │   ├── api_test.go
│   │   ├── api.go
│   │   ├── common_test.go
│   │   ├── config.go
│   │   ├── mcp
│   │   │   ├── jsonrpc
│   │   │   │   ├── jsonrpc_test.go
│   │   │   │   └── jsonrpc.go
│   │   │   ├── mcp.go
│   │   │   ├── util
│   │   │   │   └── lifecycle.go
│   │   │   ├── v20241105
│   │   │   │   ├── method.go
│   │   │   │   └── types.go
│   │   │   ├── v20250326
│   │   │   │   ├── method.go
│   │   │   │   └── types.go
│   │   │   └── v20250618
│   │   │       ├── method.go
│   │   │       └── types.go
│   │   ├── mcp_test.go
│   │   ├── mcp.go
│   │   ├── server_test.go
│   │   ├── server.go
│   │   ├── static
│   │   │   ├── assets
│   │   │   │   └── mcptoolboxlogo.png
│   │   │   ├── css
│   │   │   │   └── style.css
│   │   │   ├── index.html
│   │   │   ├── js
│   │   │   │   ├── auth.js
│   │   │   │   ├── loadTools.js
│   │   │   │   ├── mainContent.js
│   │   │   │   ├── navbar.js
│   │   │   │   ├── runTool.js
│   │   │   │   ├── toolDisplay.js
│   │   │   │   ├── tools.js
│   │   │   │   └── toolsets.js
│   │   │   ├── tools.html
│   │   │   └── toolsets.html
│   │   ├── web_test.go
│   │   └── web.go
│   ├── sources
│   │   ├── alloydbadmin
│   │   │   ├── alloydbadmin_test.go
│   │   │   └── alloydbadmin.go
│   │   ├── alloydbpg
│   │   │   ├── alloydb_pg_test.go
│   │   │   └── alloydb_pg.go
│   │   ├── bigquery
│   │   │   ├── bigquery_test.go
│   │   │   └── bigquery.go
│   │   ├── bigtable
│   │   │   ├── bigtable_test.go
│   │   │   └── bigtable.go
│   │   ├── cassandra
│   │   │   ├── cassandra_test.go
│   │   │   └── cassandra.go
│   │   ├── clickhouse
│   │   │   ├── clickhouse_test.go
│   │   │   └── clickhouse.go
│   │   ├── cloudmonitoring
│   │   │   ├── cloud_monitoring_test.go
│   │   │   └── cloud_monitoring.go
│   │   ├── cloudsqladmin
│   │   │   ├── cloud_sql_admin_test.go
│   │   │   └── cloud_sql_admin.go
│   │   ├── cloudsqlmssql
│   │   │   ├── cloud_sql_mssql_test.go
│   │   │   └── cloud_sql_mssql.go
│   │   ├── cloudsqlmysql
│   │   │   ├── cloud_sql_mysql_test.go
│   │   │   └── cloud_sql_mysql.go
│   │   ├── cloudsqlpg
│   │   │   ├── cloud_sql_pg_test.go
│   │   │   └── cloud_sql_pg.go
│   │   ├── couchbase
│   │   │   ├── couchbase_test.go
│   │   │   └── couchbase.go
│   │   ├── dataplex
│   │   │   ├── dataplex_test.go
│   │   │   └── dataplex.go
│   │   ├── dgraph
│   │   │   ├── dgraph_test.go
│   │   │   └── dgraph.go
│   │   ├── dialect.go
│   │   ├── firebird
│   │   │   ├── firebird_test.go
│   │   │   └── firebird.go
│   │   ├── firestore
│   │   │   ├── firestore_test.go
│   │   │   └── firestore.go
│   │   ├── http
│   │   │   ├── http_test.go
│   │   │   └── http.go
│   │   ├── ip_type.go
│   │   ├── looker
│   │   │   ├── looker_test.go
│   │   │   └── looker.go
│   │   ├── mongodb
│   │   │   ├── mongodb_test.go
│   │   │   └── mongodb.go
│   │   ├── mssql
│   │   │   ├── mssql_test.go
│   │   │   └── mssql.go
│   │   ├── mysql
│   │   │   ├── mysql_test.go
│   │   │   └── mysql.go
│   │   ├── neo4j
│   │   │   ├── neo4j_test.go
│   │   │   └── neo4j.go
│   │   ├── oceanbase
│   │   │   ├── oceanbase_test.go
│   │   │   └── oceanbase.go
│   │   ├── oracle
│   │   │   └── oracle.go
│   │   ├── postgres
│   │   │   ├── postgres_test.go
│   │   │   └── postgres.go
│   │   ├── redis
│   │   │   ├── redis_test.go
│   │   │   └── redis.go
│   │   ├── sources.go
│   │   ├── spanner
│   │   │   ├── spanner_test.go
│   │   │   └── spanner.go
│   │   ├── sqlite
│   │   │   ├── sqlite_test.go
│   │   │   └── sqlite.go
│   │   ├── tidb
│   │   │   ├── tidb_test.go
│   │   │   └── tidb.go
│   │   ├── trino
│   │   │   ├── trino_test.go
│   │   │   └── trino.go
│   │   ├── util.go
│   │   ├── valkey
│   │   │   ├── valkey_test.go
│   │   │   └── valkey.go
│   │   └── yugabytedb
│   │       ├── yugabytedb_test.go
│   │       └── yugabytedb.go
│   ├── telemetry
│   │   ├── instrumentation.go
│   │   └── telemetry.go
│   ├── testutils
│   │   └── testutils.go
│   ├── tools
│   │   ├── alloydb
│   │   │   ├── alloydbcreatecluster
│   │   │   │   ├── alloydbcreatecluster_test.go
│   │   │   │   └── alloydbcreatecluster.go
│   │   │   ├── alloydbcreateinstance
│   │   │   │   ├── alloydbcreateinstance_test.go
│   │   │   │   └── alloydbcreateinstance.go
│   │   │   ├── alloydbcreateuser
│   │   │   │   ├── alloydbcreateuser_test.go
│   │   │   │   └── alloydbcreateuser.go
│   │   │   ├── alloydbgetcluster
│   │   │   │   ├── alloydbgetcluster_test.go
│   │   │   │   └── alloydbgetcluster.go
│   │   │   ├── alloydbgetinstance
│   │   │   │   ├── alloydbgetinstance_test.go
│   │   │   │   └── alloydbgetinstance.go
│   │   │   ├── alloydbgetuser
│   │   │   │   ├── alloydbgetuser_test.go
│   │   │   │   └── alloydbgetuser.go
│   │   │   ├── alloydblistclusters
│   │   │   │   ├── alloydblistclusters_test.go
│   │   │   │   └── alloydblistclusters.go
│   │   │   ├── alloydblistinstances
│   │   │   │   ├── alloydblistinstances_test.go
│   │   │   │   └── alloydblistinstances.go
│   │   │   ├── alloydblistusers
│   │   │   │   ├── alloydblistusers_test.go
│   │   │   │   └── alloydblistusers.go
│   │   │   └── alloydbwaitforoperation
│   │   │       ├── alloydbwaitforoperation_test.go
│   │   │       └── alloydbwaitforoperation.go
│   │   ├── alloydbainl
│   │   │   ├── alloydbainl_test.go
│   │   │   └── alloydbainl.go
│   │   ├── bigquery
│   │   │   ├── bigqueryanalyzecontribution
│   │   │   │   ├── bigqueryanalyzecontribution_test.go
│   │   │   │   └── bigqueryanalyzecontribution.go
│   │   │   ├── bigquerycommon
│   │   │   │   ├── table_name_parser_test.go
│   │   │   │   ├── table_name_parser.go
│   │   │   │   └── util.go
│   │   │   ├── bigqueryconversationalanalytics
│   │   │   │   ├── bigqueryconversationalanalytics_test.go
│   │   │   │   └── bigqueryconversationalanalytics.go
│   │   │   ├── bigqueryexecutesql
│   │   │   │   ├── bigqueryexecutesql_test.go
│   │   │   │   └── bigqueryexecutesql.go
│   │   │   ├── bigqueryforecast
│   │   │   │   ├── bigqueryforecast_test.go
│   │   │   │   └── bigqueryforecast.go
│   │   │   ├── bigquerygetdatasetinfo
│   │   │   │   ├── bigquerygetdatasetinfo_test.go
│   │   │   │   └── bigquerygetdatasetinfo.go
│   │   │   ├── bigquerygettableinfo
│   │   │   │   ├── bigquerygettableinfo_test.go
│   │   │   │   └── bigquerygettableinfo.go
│   │   │   ├── bigquerylistdatasetids
│   │   │   │   ├── bigquerylistdatasetids_test.go
│   │   │   │   └── bigquerylistdatasetids.go
│   │   │   ├── bigquerylisttableids
│   │   │   │   ├── bigquerylisttableids_test.go
│   │   │   │   └── bigquerylisttableids.go
│   │   │   ├── bigquerysearchcatalog
│   │   │   │   ├── bigquerysearchcatalog_test.go
│   │   │   │   └── bigquerysearchcatalog.go
│   │   │   └── bigquerysql
│   │   │       ├── bigquerysql_test.go
│   │   │       └── bigquerysql.go
│   │   ├── bigtable
│   │   │   ├── bigtable_test.go
│   │   │   └── bigtable.go
│   │   ├── cassandra
│   │   │   └── cassandracql
│   │   │       ├── cassandracql_test.go
│   │   │       └── cassandracql.go
│   │   ├── clickhouse
│   │   │   ├── clickhouseexecutesql
│   │   │   │   ├── clickhouseexecutesql_test.go
│   │   │   │   └── clickhouseexecutesql.go
│   │   │   ├── clickhouselistdatabases
│   │   │   │   ├── clickhouselistdatabases_test.go
│   │   │   │   └── clickhouselistdatabases.go
│   │   │   ├── clickhouselisttables
│   │   │   │   ├── clickhouselisttables_test.go
│   │   │   │   └── clickhouselisttables.go
│   │   │   └── clickhousesql
│   │   │       ├── clickhousesql_test.go
│   │   │       └── clickhousesql.go
│   │   ├── cloudmonitoring
│   │   │   ├── cloudmonitoring_test.go
│   │   │   └── cloudmonitoring.go
│   │   ├── cloudsql
│   │   │   ├── cloudsqlcreatedatabase
│   │   │   │   ├── cloudsqlcreatedatabase_test.go
│   │   │   │   └── cloudsqlcreatedatabase.go
│   │   │   ├── cloudsqlcreateusers
│   │   │   │   ├── cloudsqlcreateusers_test.go
│   │   │   │   └── cloudsqlcreateusers.go
│   │   │   ├── cloudsqlgetinstances
│   │   │   │   ├── cloudsqlgetinstances_test.go
│   │   │   │   └── cloudsqlgetinstances.go
│   │   │   ├── cloudsqllistdatabases
│   │   │   │   ├── cloudsqllistdatabases_test.go
│   │   │   │   └── cloudsqllistdatabases.go
│   │   │   ├── cloudsqllistinstances
│   │   │   │   ├── cloudsqllistinstances_test.go
│   │   │   │   └── cloudsqllistinstances.go
│   │   │   └── cloudsqlwaitforoperation
│   │   │       ├── cloudsqlwaitforoperation_test.go
│   │   │       └── cloudsqlwaitforoperation.go
│   │   ├── cloudsqlmssql
│   │   │   └── cloudsqlmssqlcreateinstance
│   │   │       ├── cloudsqlmssqlcreateinstance_test.go
│   │   │       └── cloudsqlmssqlcreateinstance.go
│   │   ├── cloudsqlmysql
│   │   │   └── cloudsqlmysqlcreateinstance
│   │   │       ├── cloudsqlmysqlcreateinstance_test.go
│   │   │       └── cloudsqlmysqlcreateinstance.go
│   │   ├── cloudsqlpg
│   │   │   └── cloudsqlpgcreateinstances
│   │   │       ├── cloudsqlpgcreateinstances_test.go
│   │   │       └── cloudsqlpgcreateinstances.go
│   │   ├── common_test.go
│   │   ├── common.go
│   │   ├── couchbase
│   │   │   ├── couchbase_test.go
│   │   │   └── couchbase.go
│   │   ├── dataform
│   │   │   └── dataformcompilelocal
│   │   │       ├── dataformcompilelocal_test.go
│   │   │       └── dataformcompilelocal.go
│   │   ├── dataplex
│   │   │   ├── dataplexlookupentry
│   │   │   │   ├── dataplexlookupentry_test.go
│   │   │   │   └── dataplexlookupentry.go
│   │   │   ├── dataplexsearchaspecttypes
│   │   │   │   ├── dataplexsearchaspecttypes_test.go
│   │   │   │   └── dataplexsearchaspecttypes.go
│   │   │   └── dataplexsearchentries
│   │   │       ├── dataplexsearchentries_test.go
│   │   │       └── dataplexsearchentries.go
│   │   ├── dgraph
│   │   │   ├── dgraph_test.go
│   │   │   └── dgraph.go
│   │   ├── firebird
│   │   │   ├── firebirdexecutesql
│   │   │   │   ├── firebirdexecutesql_test.go
│   │   │   │   └── firebirdexecutesql.go
│   │   │   └── firebirdsql
│   │   │       ├── firebirdsql_test.go
│   │   │       └── firebirdsql.go
│   │   ├── firestore
│   │   │   ├── firestoreadddocuments
│   │   │   │   ├── firestoreadddocuments_test.go
│   │   │   │   └── firestoreadddocuments.go
│   │   │   ├── firestoredeletedocuments
│   │   │   │   ├── firestoredeletedocuments_test.go
│   │   │   │   └── firestoredeletedocuments.go
│   │   │   ├── firestoregetdocuments
│   │   │   │   ├── firestoregetdocuments_test.go
│   │   │   │   └── firestoregetdocuments.go
│   │   │   ├── firestoregetrules
│   │   │   │   ├── firestoregetrules_test.go
│   │   │   │   └── firestoregetrules.go
│   │   │   ├── firestorelistcollections
│   │   │   │   ├── firestorelistcollections_test.go
│   │   │   │   └── firestorelistcollections.go
│   │   │   ├── firestorequery
│   │   │   │   ├── firestorequery_test.go
│   │   │   │   └── firestorequery.go
│   │   │   ├── firestorequerycollection
│   │   │   │   ├── firestorequerycollection_test.go
│   │   │   │   └── firestorequerycollection.go
│   │   │   ├── firestoreupdatedocument
│   │   │   │   ├── firestoreupdatedocument_test.go
│   │   │   │   └── firestoreupdatedocument.go
│   │   │   ├── firestorevalidaterules
│   │   │   │   ├── firestorevalidaterules_test.go
│   │   │   │   └── firestorevalidaterules.go
│   │   │   └── util
│   │   │       ├── converter_test.go
│   │   │       ├── converter.go
│   │   │       ├── validator_test.go
│   │   │       └── validator.go
│   │   ├── http
│   │   │   ├── http_test.go
│   │   │   └── http.go
│   │   ├── http_method.go
│   │   ├── looker
│   │   │   ├── lookeradddashboardelement
│   │   │   │   ├── lookeradddashboardelement_test.go
│   │   │   │   └── lookeradddashboardelement.go
│   │   │   ├── lookercommon
│   │   │   │   ├── lookercommon_test.go
│   │   │   │   └── lookercommon.go
│   │   │   ├── lookerconversationalanalytics
│   │   │   │   ├── lookerconversationalanalytics_test.go
│   │   │   │   └── lookerconversationalanalytics.go
│   │   │   ├── lookercreateprojectfile
│   │   │   │   ├── lookercreateprojectfile_test.go
│   │   │   │   └── lookercreateprojectfile.go
│   │   │   ├── lookerdeleteprojectfile
│   │   │   │   ├── lookerdeleteprojectfile_test.go
│   │   │   │   └── lookerdeleteprojectfile.go
│   │   │   ├── lookerdevmode
│   │   │   │   ├── lookerdevmode_test.go
│   │   │   │   └── lookerdevmode.go
│   │   │   ├── lookergetdashboards
│   │   │   │   ├── lookergetdashboards_test.go
│   │   │   │   └── lookergetdashboards.go
│   │   │   ├── lookergetdimensions
│   │   │   │   ├── lookergetdimensions_test.go
│   │   │   │   └── lookergetdimensions.go
│   │   │   ├── lookergetexplores
│   │   │   │   ├── lookergetexplores_test.go
│   │   │   │   └── lookergetexplores.go
│   │   │   ├── lookergetfilters
│   │   │   │   ├── lookergetfilters_test.go
│   │   │   │   └── lookergetfilters.go
│   │   │   ├── lookergetlooks
│   │   │   │   ├── lookergetlooks_test.go
│   │   │   │   └── lookergetlooks.go
│   │   │   ├── lookergetmeasures
│   │   │   │   ├── lookergetmeasures_test.go
│   │   │   │   └── lookergetmeasures.go
│   │   │   ├── lookergetmodels
│   │   │   │   ├── lookergetmodels_test.go
│   │   │   │   └── lookergetmodels.go
│   │   │   ├── lookergetparameters
│   │   │   │   ├── lookergetparameters_test.go
│   │   │   │   └── lookergetparameters.go
│   │   │   ├── lookergetprojectfile
│   │   │   │   ├── lookergetprojectfile_test.go
│   │   │   │   └── lookergetprojectfile.go
│   │   │   ├── lookergetprojectfiles
│   │   │   │   ├── lookergetprojectfiles_test.go
│   │   │   │   └── lookergetprojectfiles.go
│   │   │   ├── lookergetprojects
│   │   │   │   ├── lookergetprojects_test.go
│   │   │   │   └── lookergetprojects.go
│   │   │   ├── lookerhealthanalyze
│   │   │   │   ├── lookerhealthanalyze_test.go
│   │   │   │   └── lookerhealthanalyze.go
│   │   │   ├── lookerhealthpulse
│   │   │   │   ├── lookerhealthpulse_test.go
│   │   │   │   └── lookerhealthpulse.go
│   │   │   ├── lookerhealthvacuum
│   │   │   │   ├── lookerhealthvacuum_test.go
│   │   │   │   └── lookerhealthvacuum.go
│   │   │   ├── lookermakedashboard
│   │   │   │   ├── lookermakedashboard_test.go
│   │   │   │   └── lookermakedashboard.go
│   │   │   ├── lookermakelook
│   │   │   │   ├── lookermakelook_test.go
│   │   │   │   └── lookermakelook.go
│   │   │   ├── lookerquery
│   │   │   │   ├── lookerquery_test.go
│   │   │   │   └── lookerquery.go
│   │   │   ├── lookerquerysql
│   │   │   │   ├── lookerquerysql_test.go
│   │   │   │   └── lookerquerysql.go
│   │   │   ├── lookerqueryurl
│   │   │   │   ├── lookerqueryurl_test.go
│   │   │   │   └── lookerqueryurl.go
│   │   │   ├── lookerrunlook
│   │   │   │   ├── lookerrunlook_test.go
│   │   │   │   └── lookerrunlook.go
│   │   │   └── lookerupdateprojectfile
│   │   │       ├── lookerupdateprojectfile_test.go
│   │   │       └── lookerupdateprojectfile.go
│   │   ├── mongodb
│   │   │   ├── mongodbaggregate
│   │   │   │   ├── mongodbaggregate_test.go
│   │   │   │   └── mongodbaggregate.go
│   │   │   ├── mongodbdeletemany
│   │   │   │   ├── mongodbdeletemany_test.go
│   │   │   │   └── mongodbdeletemany.go
│   │   │   ├── mongodbdeleteone
│   │   │   │   ├── mongodbdeleteone_test.go
│   │   │   │   └── mongodbdeleteone.go
│   │   │   ├── mongodbfind
│   │   │   │   ├── mongodbfind_test.go
│   │   │   │   └── mongodbfind.go
│   │   │   ├── mongodbfindone
│   │   │   │   ├── mongodbfindone_test.go
│   │   │   │   └── mongodbfindone.go
│   │   │   ├── mongodbinsertmany
│   │   │   │   ├── mongodbinsertmany_test.go
│   │   │   │   └── mongodbinsertmany.go
│   │   │   ├── mongodbinsertone
│   │   │   │   ├── mongodbinsertone_test.go
│   │   │   │   └── mongodbinsertone.go
│   │   │   ├── mongodbupdatemany
│   │   │   │   ├── mongodbupdatemany_test.go
│   │   │   │   └── mongodbupdatemany.go
│   │   │   └── mongodbupdateone
│   │   │       ├── mongodbupdateone_test.go
│   │   │       └── mongodbupdateone.go
│   │   ├── mssql
│   │   │   ├── mssqlexecutesql
│   │   │   │   ├── mssqlexecutesql_test.go
│   │   │   │   └── mssqlexecutesql.go
│   │   │   ├── mssqllisttables
│   │   │   │   ├── mssqllisttables_test.go
│   │   │   │   └── mssqllisttables.go
│   │   │   └── mssqlsql
│   │   │       ├── mssqlsql_test.go
│   │   │       └── mssqlsql.go
│   │   ├── mysql
│   │   │   ├── mysqlcommon
│   │   │   │   └── mysqlcommon.go
│   │   │   ├── mysqlexecutesql
│   │   │   │   ├── mysqlexecutesql_test.go
│   │   │   │   └── mysqlexecutesql.go
│   │   │   ├── mysqllistactivequeries
│   │   │   │   ├── mysqllistactivequeries_test.go
│   │   │   │   └── mysqllistactivequeries.go
│   │   │   ├── mysqllisttablefragmentation
│   │   │   │   ├── mysqllisttablefragmentation_test.go
│   │   │   │   └── mysqllisttablefragmentation.go
│   │   │   ├── mysqllisttables
│   │   │   │   ├── mysqllisttables_test.go
│   │   │   │   └── mysqllisttables.go
│   │   │   ├── mysqllisttablesmissinguniqueindexes
│   │   │   │   ├── mysqllisttablesmissinguniqueindexes_test.go
│   │   │   │   └── mysqllisttablesmissinguniqueindexes.go
│   │   │   └── mysqlsql
│   │   │       ├── mysqlsql_test.go
│   │   │       └── mysqlsql.go
│   │   ├── neo4j
│   │   │   ├── neo4jcypher
│   │   │   │   ├── neo4jcypher_test.go
│   │   │   │   └── neo4jcypher.go
│   │   │   ├── neo4jexecutecypher
│   │   │   │   ├── classifier
│   │   │   │   │   ├── classifier_test.go
│   │   │   │   │   └── classifier.go
│   │   │   │   ├── neo4jexecutecypher_test.go
│   │   │   │   └── neo4jexecutecypher.go
│   │   │   └── neo4jschema
│   │   │       ├── cache
│   │   │       │   ├── cache_test.go
│   │   │       │   └── cache.go
│   │   │       ├── helpers
│   │   │       │   ├── helpers_test.go
│   │   │       │   └── helpers.go
│   │   │       ├── neo4jschema_test.go
│   │   │       ├── neo4jschema.go
│   │   │       └── types
│   │   │           └── types.go
│   │   ├── oceanbase
│   │   │   ├── oceanbaseexecutesql
│   │   │   │   ├── oceanbaseexecutesql_test.go
│   │   │   │   └── oceanbaseexecutesql.go
│   │   │   └── oceanbasesql
│   │   │       ├── oceanbasesql_test.go
│   │   │       └── oceanbasesql.go
│   │   ├── oracle
│   │   │   ├── oracleexecutesql
│   │   │   │   └── oracleexecutesql.go
│   │   │   └── oraclesql
│   │   │       └── oraclesql.go
│   │   ├── parameters_test.go
│   │   ├── parameters.go
│   │   ├── postgres
│   │   │   ├── postgresexecutesql
│   │   │   │   ├── postgresexecutesql_test.go
│   │   │   │   └── postgresexecutesql.go
│   │   │   ├── postgreslistactivequeries
│   │   │   │   ├── postgreslistactivequeries_test.go
│   │   │   │   └── postgreslistactivequeries.go
│   │   │   ├── postgreslistavailableextensions
│   │   │   │   ├── postgreslistavailableextensions_test.go
│   │   │   │   └── postgreslistavailableextensions.go
│   │   │   ├── postgreslistinstalledextensions
│   │   │   │   ├── postgreslistinstalledextensions_test.go
│   │   │   │   └── postgreslistinstalledextensions.go
│   │   │   ├── postgreslisttables
│   │   │   │   ├── postgreslisttables_test.go
│   │   │   │   └── postgreslisttables.go
│   │   │   └── postgressql
│   │   │       ├── postgressql_test.go
│   │   │       └── postgressql.go
│   │   ├── redis
│   │   │   ├── redis_test.go
│   │   │   └── redis.go
│   │   ├── spanner
│   │   │   ├── spannerexecutesql
│   │   │   │   ├── spannerexecutesql_test.go
│   │   │   │   └── spannerexecutesql.go
│   │   │   ├── spannerlisttables
│   │   │   │   ├── spannerlisttables_test.go
│   │   │   │   └── spannerlisttables.go
│   │   │   └── spannersql
│   │   │       ├── spanner_test.go
│   │   │       └── spannersql.go
│   │   ├── sqlite
│   │   │   ├── sqliteexecutesql
│   │   │   │   ├── sqliteexecutesql_test.go
│   │   │   │   └── sqliteexecutesql.go
│   │   │   └── sqlitesql
│   │   │       ├── sqlitesql_test.go
│   │   │       └── sqlitesql.go
│   │   ├── tidb
│   │   │   ├── tidbexecutesql
│   │   │   │   ├── tidbexecutesql_test.go
│   │   │   │   └── tidbexecutesql.go
│   │   │   └── tidbsql
│   │   │       ├── tidbsql_test.go
│   │   │       └── tidbsql.go
│   │   ├── tools_test.go
│   │   ├── tools.go
│   │   ├── toolsets.go
│   │   ├── trino
│   │   │   ├── trinoexecutesql
│   │   │   │   ├── trinoexecutesql_test.go
│   │   │   │   └── trinoexecutesql.go
│   │   │   └── trinosql
│   │   │       ├── trinosql_test.go
│   │   │       └── trinosql.go
│   │   ├── utility
│   │   │   └── wait
│   │   │       ├── wait_test.go
│   │   │       └── wait.go
│   │   ├── valkey
│   │   │   ├── valkey_test.go
│   │   │   └── valkey.go
│   │   └── yugabytedbsql
│   │       ├── yugabytedbsql_test.go
│   │       └── yugabytedbsql.go
│   └── util
│       └── util.go
├── LICENSE
├── logo.png
├── main.go
├── MCP-TOOLBOX-EXTENSION.md
├── README.md
└── tests
    ├── alloydb
    │   ├── alloydb_integration_test.go
    │   └── alloydb_wait_for_operation_test.go
    ├── alloydbainl
    │   └── alloydb_ai_nl_integration_test.go
    ├── alloydbpg
    │   └── alloydb_pg_integration_test.go
    ├── auth.go
    ├── bigquery
    │   └── bigquery_integration_test.go
    ├── bigtable
    │   └── bigtable_integration_test.go
    ├── cassandra
    │   └── cassandra_integration_test.go
    ├── clickhouse
    │   └── clickhouse_integration_test.go
    ├── cloudmonitoring
    │   └── cloud_monitoring_integration_test.go
    ├── cloudsql
    │   ├── cloud_sql_create_database_test.go
    │   ├── cloud_sql_create_users_test.go
    │   ├── cloud_sql_get_instances_test.go
    │   ├── cloud_sql_list_databases_test.go
    │   ├── cloudsql_list_instances_test.go
    │   └── cloudsql_wait_for_operation_test.go
    ├── cloudsqlmssql
    │   ├── cloud_sql_mssql_create_instance_integration_test.go
    │   └── cloud_sql_mssql_integration_test.go
    ├── cloudsqlmysql
    │   ├── cloud_sql_mysql_create_instance_integration_test.go
    │   └── cloud_sql_mysql_integration_test.go
    ├── cloudsqlpg
    │   ├── cloud_sql_pg_create_instances_test.go
    │   └── cloud_sql_pg_integration_test.go
    ├── common.go
    ├── couchbase
    │   └── couchbase_integration_test.go
    ├── dataform
    │   └── dataform_integration_test.go
    ├── dataplex
    │   └── dataplex_integration_test.go
    ├── dgraph
    │   └── dgraph_integration_test.go
    ├── firebird
    │   └── firebird_integration_test.go
    ├── firestore
    │   └── firestore_integration_test.go
    ├── http
    │   └── http_integration_test.go
    ├── looker
    │   └── looker_integration_test.go
    ├── mongodb
    │   └── mongodb_integration_test.go
    ├── mssql
    │   └── mssql_integration_test.go
    ├── mysql
    │   └── mysql_integration_test.go
    ├── neo4j
    │   └── neo4j_integration_test.go
    ├── oceanbase
    │   └── oceanbase_integration_test.go
    ├── option.go
    ├── oracle
    │   └── oracle_integration_test.go
    ├── postgres
    │   └── postgres_integration_test.go
    ├── redis
    │   └── redis_test.go
    ├── server.go
    ├── source.go
    ├── spanner
    │   └── spanner_integration_test.go
    ├── sqlite
    │   └── sqlite_integration_test.go
    ├── tidb
    │   └── tidb_integration_test.go
    ├── tool.go
    ├── trino
    │   └── trino_integration_test.go
    ├── utility
    │   └── wait_integration_test.go
    ├── valkey
    │   └── valkey_test.go
    └── yugabytedb
        └── yugabytedb_integration_test.go
```

# Files

--------------------------------------------------------------------------------
/internal/tools/mysql/mysqllisttables/mysqllisttables.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package mysqllisttables

import (
	"context"
	"database/sql"
	"fmt"

	yaml "github.com/goccy/go-yaml"
	"github.com/googleapis/genai-toolbox/internal/sources"
	"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmysql"
	"github.com/googleapis/genai-toolbox/internal/sources/mysql"
	"github.com/googleapis/genai-toolbox/internal/tools"
	"github.com/googleapis/genai-toolbox/internal/tools/mysql/mysqlcommon"
)

const kind string = "mysql-list-tables"

const listTablesStatement = `
    SELECT
        T.TABLE_SCHEMA AS schema_name,
        T.TABLE_NAME AS object_name,
        CASE
            WHEN @output_format = 'simple' THEN
                JSON_OBJECT('name', T.TABLE_NAME)
            ELSE
                CONVERT(
                    JSON_OBJECT(
                        'schema_name', T.TABLE_SCHEMA,
                        'object_name', T.TABLE_NAME,
                        'object_type', 'TABLE',
                        'owner', (
                            SELECT
                                IFNULL(U.GRANTEE, 'N/A')
                            FROM
                                INFORMATION_SCHEMA.SCHEMA_PRIVILEGES U
                            WHERE
                                U.TABLE_SCHEMA = T.TABLE_SCHEMA
                            LIMIT 1
                        ),
                        'comment', IFNULL(T.TABLE_COMMENT, ''),
                        'columns', (
                            SELECT
                                IFNULL(
                                    JSON_ARRAYAGG(
                                        JSON_OBJECT(
                                            'column_name', C.COLUMN_NAME,
                                            'data_type', C.COLUMN_TYPE,
                                            'ordinal_position', C.ORDINAL_POSITION,
                                            'is_not_nullable', IF(C.IS_NULLABLE = 'NO', TRUE, FALSE),
                                            'column_default', C.COLUMN_DEFAULT,
                                            'column_comment', IFNULL(C.COLUMN_COMMENT, '')
                                        )
                                    ),
                                    JSON_ARRAY()
                                )
                            FROM
                                INFORMATION_SCHEMA.COLUMNS C
                            WHERE
                                C.TABLE_SCHEMA = T.TABLE_SCHEMA AND C.TABLE_NAME = T.TABLE_NAME
                            ORDER BY C.ORDINAL_POSITION
                        ),
                        'constraints', (
                            SELECT
                                IFNULL(
                                    JSON_ARRAYAGG(
                                        JSON_OBJECT(
                                            'constraint_name', TC.CONSTRAINT_NAME,
                                            'constraint_type',
                                            CASE TC.CONSTRAINT_TYPE
                                                WHEN 'PRIMARY KEY' THEN 'PRIMARY KEY'
                                                WHEN 'FOREIGN KEY' THEN 'FOREIGN KEY'
                                                WHEN 'UNIQUE' THEN 'UNIQUE'
                                                ELSE TC.CONSTRAINT_TYPE
                                            END,
                                            'constraint_definition', '',
                                            'constraint_columns', (
                                                SELECT
                                                    IFNULL(JSON_ARRAYAGG(KCU.COLUMN_NAME), JSON_ARRAY())
                                                FROM
                                                    INFORMATION_SCHEMA.KEY_COLUMN_USAGE KCU
                                                WHERE
                                                    KCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
                                                    AND KCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
                                                    AND KCU.TABLE_NAME = TC.TABLE_NAME
                                                ORDER BY KCU.ORDINAL_POSITION
                                            ),
                                            'foreign_key_referenced_table', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY', RC.REFERENCED_TABLE_NAME, NULL),
                                            'foreign_key_referenced_columns', IF(TC.CONSTRAINT_TYPE = 'FOREIGN KEY',
                                                (SELECT IFNULL(JSON_ARRAYAGG(FKCU.REFERENCED_COLUMN_NAME), JSON_ARRAY())
                                                FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE FKCU
                                                WHERE FKCU.CONSTRAINT_SCHEMA = TC.CONSTRAINT_SCHEMA
                                                    AND FKCU.CONSTRAINT_NAME = TC.CONSTRAINT_NAME
                                                    AND FKCU.TABLE_NAME = TC.TABLE_NAME
                                                    AND FKCU.REFERENCED_TABLE_NAME IS NOT NULL
                                                ORDER BY FKCU.ORDINAL_POSITION),
                                                NULL
                                            )
                                        )
                                    ),
                                    JSON_ARRAY()
                                )
                            FROM
                                INFORMATION_SCHEMA.TABLE_CONSTRAINTS TC
                            LEFT JOIN
                                INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS RC
                                ON TC.CONSTRAINT_SCHEMA = RC.CONSTRAINT_SCHEMA
                                AND TC.CONSTRAINT_NAME = RC.CONSTRAINT_NAME
                                AND TC.TABLE_NAME = RC.TABLE_NAME
                            WHERE
                                TC.TABLE_SCHEMA = T.TABLE_SCHEMA AND TC.TABLE_NAME = T.TABLE_NAME
                        ),
                        'indexes', (
                            SELECT
                                IFNULL(
                                    JSON_ARRAYAGG(
                                        JSON_OBJECT(
                                            'index_name', IndexData.INDEX_NAME,
                                            'is_unique', IF(IndexData.NON_UNIQUE = 0, TRUE, FALSE),
                                            'is_primary', IF(IndexData.INDEX_NAME = 'PRIMARY', TRUE, FALSE),
                                            'index_columns', IFNULL(IndexData.INDEX_COLUMNS_ARRAY, JSON_ARRAY())
                                        )
                                    ),
                                    JSON_ARRAY()
                                )
                            FROM (
                                SELECT
                                    S.TABLE_SCHEMA,
                                    S.TABLE_NAME,
                                    S.INDEX_NAME,
                                    MIN(S.NON_UNIQUE) AS NON_UNIQUE,
                                    JSON_ARRAYAGG(S.COLUMN_NAME) AS INDEX_COLUMNS_ARRAY
                                FROM
                                    INFORMATION_SCHEMA.STATISTICS S
                                GROUP BY
                                    S.TABLE_SCHEMA, S.TABLE_NAME, S.INDEX_NAME
                            ) AS IndexData
                            WHERE IndexData.TABLE_SCHEMA = T.TABLE_SCHEMA AND IndexData.TABLE_NAME = T.TABLE_NAME
                            ORDER BY IndexData.INDEX_NAME
                        ),
                        'triggers', (
                            SELECT
                                IFNULL(
                                    JSON_ARRAYAGG(
                                        JSON_OBJECT(
                                            'trigger_name', TR.TRIGGER_NAME,
                                            'trigger_definition', TR.ACTION_STATEMENT
                                        )
                                    ),
                                    JSON_ARRAY()
                                )
                            FROM
                                INFORMATION_SCHEMA.TRIGGERS TR
                            WHERE
                                TR.EVENT_OBJECT_SCHEMA = T.TABLE_SCHEMA AND TR.EVENT_OBJECT_TABLE = T.TABLE_NAME
                            ORDER BY TR.TRIGGER_NAME
                        )
                    )
                USING utf8mb4)
        END AS object_details
    FROM
        INFORMATION_SCHEMA.TABLES T
    CROSS JOIN (SELECT @table_names := ?, @output_format := ?) AS variables
    WHERE
        T.TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys')
        AND (NULLIF(TRIM(@table_names), '') IS NULL OR FIND_IN_SET(T.TABLE_NAME, @table_names))
        AND T.TABLE_TYPE = 'BASE TABLE'
    ORDER BY
        T.TABLE_SCHEMA, T.TABLE_NAME;
`

func init() {
	if !tools.Register(kind, newConfig) {
		panic(fmt.Sprintf("tool kind %q already registered", kind))
	}
}

func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
	actual := Config{Name: name}
	if err := decoder.DecodeContext(ctx, &actual); err != nil {
		return nil, err
	}
	return actual, nil
}

type compatibleSource interface {
	MySQLPool() *sql.DB
}

// validate compatible sources are still compatible
var _ compatibleSource = &cloudsqlmysql.Source{}
var _ compatibleSource = &mysql.Source{}

var compatibleSources = [...]string{cloudsqlmysql.SourceKind, mysql.SourceKind}

type Config struct {
	Name         string   `yaml:"name" validate:"required"`
	Kind         string   `yaml:"kind" validate:"required"`
	Source       string   `yaml:"source" validate:"required"`
	Description  string   `yaml:"description" validate:"required"`
	AuthRequired []string `yaml:"authRequired"`
}

// validate interface
var _ tools.ToolConfig = Config{}

func (cfg Config) ToolConfigKind() string {
	return kind
}

func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
	// verify source exists
	rawS, ok := srcs[cfg.Source]
	if !ok {
		return nil, fmt.Errorf("no source named %q configured", cfg.Source)
	}

	// verify the source is compatible
	s, ok := rawS.(compatibleSource)
	if !ok {
		return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
	}

	allParameters := tools.Parameters{
		tools.NewStringParameterWithDefault("table_names", "", "Optional: A comma-separated list of table names. If empty, details for all tables will be listed."),
		tools.NewStringParameterWithDefault("output_format", "detailed", "Optional: Use 'simple' for names only or 'detailed' for full info."),
	}
	paramManifest := allParameters.Manifest()
	mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters)

	// finish tool setup
	t := Tool{
		Name:         cfg.Name,
		Kind:         kind,
		AllParams:    allParameters,
		AuthRequired: cfg.AuthRequired,
		Pool:         s.MySQLPool(),
		manifest:     tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
		mcpManifest:  mcpManifest,
	}
	return t, nil
}

// validate interface
var _ tools.Tool = Tool{}

type Tool struct {
	Name         string           `yaml:"name"`
	Kind         string           `yaml:"kind"`
	AuthRequired []string         `yaml:"authRequired"`
	AllParams    tools.Parameters `yaml:"allParams"`

	Pool        *sql.DB
	manifest    tools.Manifest
	mcpManifest tools.McpManifest
}

func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
	paramsMap := params.AsMap()

	tableNames, ok := paramsMap["table_names"].(string)
	if !ok {
		return nil, fmt.Errorf("invalid '%s' parameter; expected a string", tableNames)
	}
	outputFormat, _ := paramsMap["output_format"].(string)
	if outputFormat != "simple" && outputFormat != "detailed" {
		return nil, fmt.Errorf("invalid value for output_format: must be 'simple' or 'detailed', but got %q", outputFormat)
	}

	results, err := t.Pool.QueryContext(ctx, listTablesStatement, tableNames, outputFormat)
	if err != nil {
		return nil, fmt.Errorf("unable to execute query: %w", err)
	}

	cols, err := results.Columns()
	if err != nil {
		return nil, fmt.Errorf("unable to retrieve rows column name: %w", err)
	}

	// create an array of values for each column, which can be re-used to scan each row
	rawValues := make([]any, len(cols))
	values := make([]any, len(cols))
	for i := range rawValues {
		values[i] = &rawValues[i]
	}
	defer results.Close()

	colTypes, err := results.ColumnTypes()
	if err != nil {
		return nil, fmt.Errorf("unable to get column types: %w", err)
	}

	var out []any
	for results.Next() {
		err := results.Scan(values...)
		if err != nil {
			return nil, fmt.Errorf("unable to parse row: %w", err)
		}
		vMap := make(map[string]any)
		for i, name := range cols {
			val := rawValues[i]
			if val == nil {
				vMap[name] = nil
				continue
			}

			vMap[name], err = mysqlcommon.ConvertToType(colTypes[i], val)
			if err != nil {
				return nil, fmt.Errorf("errors encountered when converting values: %w", err)
			}
		}
		out = append(out, vMap)
	}

	if err := results.Err(); err != nil {
		return nil, fmt.Errorf("errors encountered during row iteration: %w", err)
	}

	return out, nil
}

func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
	return tools.ParseParams(t.AllParams, data, claims)
}

func (t Tool) Manifest() tools.Manifest {
	return t.manifest
}

func (t Tool) McpManifest() tools.McpManifest {
	return t.mcpManifest
}

func (t Tool) Authorized(verifiedAuthServices []string) bool {
	return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}

func (t Tool) RequiresClientAuthorization() bool {
	return false
}

```

--------------------------------------------------------------------------------
/internal/tools/looker/lookerhealthpulse/lookerhealthpulse.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//	http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lookerhealthpulse

import (
	"context"
	"encoding/json"
	"fmt"
	"strings"

	yaml "github.com/goccy/go-yaml"
	"github.com/googleapis/genai-toolbox/internal/sources"
	lookersrc "github.com/googleapis/genai-toolbox/internal/sources/looker"
	"github.com/googleapis/genai-toolbox/internal/tools"
	"github.com/googleapis/genai-toolbox/internal/tools/looker/lookercommon"
	"github.com/googleapis/genai-toolbox/internal/util"

	"github.com/looker-open-source/sdk-codegen/go/rtl"
	v4 "github.com/looker-open-source/sdk-codegen/go/sdk/v4"
)

// =================================================================================================================
// START MCP SERVER CORE LOGIC
// =================================================================================================================
const kind string = "looker-health-pulse"

func init() {
	if !tools.Register(kind, newConfig) {
		panic(fmt.Sprintf("tool kind %q already registered", kind))
	}
}

func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
	actual := Config{Name: name}
	if err := decoder.DecodeContext(ctx, &actual); err != nil {
		return nil, err
	}
	return actual, nil
}

type Config struct {
	Name         string         `yaml:"name" validate:"required"`
	Kind         string         `yaml:"kind" validate:"required"`
	Source       string         `yaml:"source" validate:"required"`
	Description  string         `yaml:"description" validate:"required"`
	AuthRequired []string       `yaml:"authRequired"`
	Parameters   map[string]any `yaml:"parameters"`
}

// validate interface
var _ tools.ToolConfig = Config{}

func (cfg Config) ToolConfigKind() string {
	return kind
}

func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
	// verify source exists
	rawS, ok := srcs[cfg.Source]
	if !ok {
		return nil, fmt.Errorf("no source named %q configured", cfg.Source)
	}

	// verify the source is compatible
	s, ok := rawS.(*lookersrc.Source)
	if !ok {
		return nil, fmt.Errorf("invalid source for %q tool: source kind must be `looker`", kind)
	}

	actionParameter := tools.NewStringParameterWithRequired("action", "The health check to run. Can be either: `check_db_connections`, `check_dashboard_performance`,`check_dashboard_errors`,`check_explore_performance`,`check_schedule_failures`, or `check_legacy_features`", true)

	parameters := tools.Parameters{
		actionParameter,
	}

	mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, parameters)

	// finish tool setup
	return Tool{
		Name:           cfg.Name,
		Kind:           kind,
		Parameters:     parameters,
		AuthRequired:   cfg.AuthRequired,
		UseClientOAuth: s.UseClientOAuth,
		Client:         s.Client,
		ApiSettings:    s.ApiSettings,
		manifest: tools.Manifest{
			Description:  cfg.Description,
			Parameters:   parameters.Manifest(),
			AuthRequired: cfg.AuthRequired,
		},
		mcpManifest: mcpManifest,
	}, nil
}

// validate interface
var _ tools.Tool = Tool{}

type Tool struct {
	Name           string `yaml:"name"`
	Kind           string `yaml:"kind"`
	UseClientOAuth bool
	Client         *v4.LookerSDK
	ApiSettings    *rtl.ApiSettings
	AuthRequired   []string         `yaml:"authRequired"`
	Parameters     tools.Parameters `yaml:"parameters"`
	manifest       tools.Manifest
	mcpManifest    tools.McpManifest
}

func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}

	sdk, err := lookercommon.GetLookerSDK(t.UseClientOAuth, t.ApiSettings, t.Client, accessToken)
	if err != nil {
		return nil, fmt.Errorf("error getting sdk: %w", err)
	}

	pulseTool := &pulseTool{
		ApiSettings: t.ApiSettings,
		SdkClient:   sdk,
	}

	paramsMap := params.AsMap()
	action, ok := paramsMap["action"].(string)
	if !ok {
		return nil, fmt.Errorf("action parameter not found")
	}

	pulseParams := PulseParams{
		Action: action,
	}

	result, err := pulseTool.RunPulse(ctx, pulseParams)
	if err != nil {
		return nil, fmt.Errorf("error running pulse: %w", err)
	}

	logger.DebugContext(ctx, "result = ", result)

	return result, nil
}

func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
	return tools.ParseParams(t.Parameters, data, claims)
}

func (t Tool) Manifest() tools.Manifest {
	return t.manifest
}

func (t Tool) McpManifest() tools.McpManifest {
	return t.mcpManifest
}

func (t Tool) Authorized(verifiedAuthServices []string) bool {
	return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}

func (t Tool) RequiresClientAuthorization() bool {
	return t.UseClientOAuth
}

// =================================================================================================================
// END MCP SERVER CORE LOGIC
// =================================================================================================================

// =================================================================================================================
// START LOOKER HEALTH PULSE CORE LOGIC
// =================================================================================================================
type PulseParams struct {
	Action string
	// Optionally add more parameters if needed
}

// pulseTool holds Looker API settings and client
type pulseTool struct {
	ApiSettings *rtl.ApiSettings
	SdkClient   *v4.LookerSDK
}

func (t *pulseTool) RunPulse(ctx context.Context, params PulseParams) (interface{}, error) {
	switch params.Action {
	case "check_db_connections":
		return t.checkDBConnections(ctx)
	case "check_dashboard_performance":
		return t.checkDashboardPerformance(ctx)
	case "check_dashboard_errors":
		return t.checkDashboardErrors(ctx)
	case "check_explore_performance":
		return t.checkExplorePerformance(ctx)
	case "check_schedule_failures":
		return t.checkScheduleFailures(ctx)
	case "check_legacy_features":
		return t.checkLegacyFeatures(ctx)
	default:
		return nil, fmt.Errorf("unknown action: %s", params.Action)
	}
}

// Check DB connections and run tests
func (t *pulseTool) checkDBConnections(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 1/6: Checking connections")

	reservedNames := map[string]struct{}{
		"looker__internal__analytics__replica": {},
		"looker__internal__analytics":          {},
		"looker":                               {},
		"looker__ilooker":                      {},
	}

	connections, err := t.SdkClient.AllConnections("", t.ApiSettings)
	if err != nil {
		return nil, fmt.Errorf("error fetching connections: %w", err)
	}

	var filteredConnections []v4.DBConnection
	for _, c := range connections {
		if _, reserved := reservedNames[*c.Name]; !reserved {
			filteredConnections = append(filteredConnections, c)
		}
	}
	if len(filteredConnections) == 0 {
		return nil, fmt.Errorf("no connections found")
	}

	var results []map[string]interface{}
	for _, conn := range filteredConnections {
		var errors []string
		// Test connection (simulate test_connection endpoint)
		resp, err := t.SdkClient.TestConnection(*conn.Name, nil, t.ApiSettings)
		if err != nil {
			errors = append(errors, "API JSONDecode Error")
		} else {
			for _, r := range resp {
				if *r.Status == "error" {
					errors = append(errors, *r.Message)
				}
			}
		}

		// Run inline query for connection activity
		limit := "1"
		query := &v4.WriteQuery{
			Model:  "system__activity",
			View:   "history",
			Fields: &[]string{"history.query_run_count"},
			Filters: &map[string]any{
				"history.connection_name": *conn.Name,
				"history.created_date":    "90 days",
				"user.dev_branch_name":    "NULL",
			},
			Limit: &limit,
		}
		raw, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
		if err != nil {
			return nil, err
		}
		var queryRunCount interface{}
		var data []map[string]interface{}
		_ = json.Unmarshal([]byte(raw), &data)
		if len(data) > 0 {
			queryRunCount = data[0]["history.query_run_count"]
		}

		results = append(results, map[string]interface{}{
			"Connection":  *conn.Name,
			"Status":      "OK",
			"Errors":      errors,
			"Query Count": queryRunCount,
		})
	}
	return results, nil
}

func (t *pulseTool) checkDashboardPerformance(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 2/6: Checking for dashboards with queries slower than 30 seconds in the last 7 days")

	limit := "20"
	query := &v4.WriteQuery{
		Model:  "system__activity",
		View:   "history",
		Fields: &[]string{"dashboard.title", "query.count"},
		Filters: &map[string]any{
			"history.created_date": "7 days",
			"history.real_dash_id": "-NULL",
			"history.runtime":      ">30",
			"history.status":       "complete",
		},
		Sorts: &[]string{"query.count desc"},
		Limit: &limit,
	}
	raw, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
	if err != nil {
		return nil, err
	}
	var dashboards []map[string]interface{}
	if err := json.Unmarshal([]byte(raw), &dashboards); err != nil {
		return nil, err
	}
	return dashboards, nil
}

func (t *pulseTool) checkDashboardErrors(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 3/6: Checking for dashboards with erroring queries in the last 7 days")

	limit := "20"
	query := &v4.WriteQuery{
		Model:  "system__activity",
		View:   "history",
		Fields: &[]string{"dashboard.title", "history.query_run_count"},
		Filters: &map[string]any{
			"dashboard.title":           "-NULL",
			"history.created_date":      "7 days",
			"history.dashboard_session": "-NULL",
			"history.status":            "error",
		},
		Sorts: &[]string{"history.query_run_count desc"},
		Limit: &limit,
	}
	raw, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
	if err != nil {
		return nil, err
	}
	var dashboards []map[string]interface{}
	if err := json.Unmarshal([]byte(raw), &dashboards); err != nil {
		return nil, err
	}
	return dashboards, nil
}

func (t *pulseTool) checkExplorePerformance(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 4/6: Checking for the slowest explores in the past 7 days")

	limit := "20"
	query := &v4.WriteQuery{
		Model:  "system__activity",
		View:   "history",
		Fields: &[]string{"query.model", "query.view", "history.average_runtime"},
		Filters: &map[string]any{
			"history.created_date": "7 days",
			"query.model":          "-NULL, -system^_^_activity",
		},
		Sorts: &[]string{"history.average_runtime desc"},
		Limit: &limit,
	}
	raw, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
	if err != nil {
		return nil, err
	}
	var explores []map[string]interface{}
	if err := json.Unmarshal([]byte(raw), &explores); err != nil {
		return nil, err
	}

	// Average query runtime
	query.Fields = &[]string{"history.average_runtime"}
	rawAvg, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
	if err != nil {
		return nil, err
	}
	var avgData []map[string]interface{}
	if err := json.Unmarshal([]byte(rawAvg), &avgData); err == nil {
		if len(avgData) > 0 {
			if avgRuntime, ok := avgData[0]["history.average_runtime"].(float64); ok {
				logger.InfoContext(ctx, fmt.Sprintf("For context, the average query runtime is %.4fs", avgRuntime))
			}
		}
	}
	return explores, nil
}

func (t *pulseTool) checkScheduleFailures(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 5/6: Checking for failing schedules")

	limit := "500"
	query := &v4.WriteQuery{
		Model:  "system__activity",
		View:   "scheduled_plan",
		Fields: &[]string{"scheduled_job.name", "scheduled_job.count"},
		Filters: &map[string]any{
			"scheduled_job.created_date": "7 days",
			"scheduled_job.status":       "failure",
		},
		Sorts: &[]string{"scheduled_job.count desc"},
		Limit: &limit,
	}
	raw, err := lookercommon.RunInlineQuery(ctx, t.SdkClient, query, "json", t.ApiSettings)
	if err != nil {
		return nil, err
	}
	var schedules []map[string]interface{}
	if err := json.Unmarshal([]byte(raw), &schedules); err != nil {
		return nil, err
	}
	return schedules, nil
}

func (t *pulseTool) checkLegacyFeatures(ctx context.Context) (interface{}, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return nil, fmt.Errorf("unable to get logger from ctx: %s", err)
	}
	logger.InfoContext(ctx, "Test 6/6: Checking for enabled legacy features")

	features, err := t.SdkClient.AllLegacyFeatures(t.ApiSettings)
	if err != nil {
		if strings.Contains(err.Error(), "Unsupported in Looker (Google Cloud core)") {
			return []map[string]string{{"Feature": "Unsupported in Looker (Google Cloud core)"}}, nil
		}
		logger.ErrorContext(ctx, err.Error())
		return []map[string]string{{"Feature": "Unable to pull legacy features due to SDK error"}}, nil
	}
	var legacyFeatures []map[string]string
	for _, f := range features {
		if *f.Enabled {
			legacyFeatures = append(legacyFeatures, map[string]string{"Feature": *f.Name})
		}
	}
	return legacyFeatures, nil
}

// =================================================================================================================
// END LOOKER HEALTH PULSE CORE LOGIC
// =================================================================================================================

```

--------------------------------------------------------------------------------
/DEVELOPER.md:
--------------------------------------------------------------------------------

```markdown
# DEVELOPER.md

This document provides instructions for setting up your development environment
and contributing to the Toolbox project.

## Prerequisites

Before you begin, ensure you have the following:

1. **Databases:** Set up the necessary databases for your development
   environment.
1. **Go:** Install the latest version of [Go](https://go.dev/doc/install).
1. **Dependencies:** Download and manage project dependencies:

    ```bash
    go get
    go mod tidy
    ```

## Developing Toolbox

### Running from Local Source

1. **Configuration:** Create a `tools.yaml` file to configure your sources and
   tools. See the [Configuration section in the
   README](./README.md#Configuration) for details.
1. **CLI Flags:** List available command-line flags for the Toolbox server:

    ```bash
    go run . --help
    ```

1. **Running the Server:** Start the Toolbox server with optional flags. The
   server listens on port 5000 by default.

    ```bash
    go run .
    ```

1. **Testing the Endpoint:** Verify the server is running by sending a request
   to the endpoint:

    ```bash
    curl http://127.0.0.1:5000
    ```

### Tool Naming Conventions

This section details the purpose and conventions for MCP Toolbox's tools naming
properties, **tool name** and **tool kind**.

```
cancel_hotel: <- tool name
    kind: postgres-sql  <- tool kind
    source: my_pg_source
```

#### Tool Name

Tool name is the identifier used by a Large Language Model (LLM) to invoke a
specific tool.

* Custom tools: The user can define any name they want. The below guidelines
  do not apply.
* Pre-built tools: The tool name is predefined and cannot be changed. It
should follow the guidelines.

The following guidelines apply to tool names:

* Should use underscores over hyphens (e.g., `list_collections` instead of
  `list-collections`).
* Should not have the product name in the name (e.g., `list_collections` instead
  of `firestore_list_collections`).
* Superficial changes are NOT considered as breaking (e.g., changing tool name).
* Non-superficial changes MAY be considered breaking (e.g. adding new parameters
  to a function) until they can be validated through extensive testing to ensure
  they do not negatively impact agent's performances.

#### Tool Kind

Tool kind serves as a category or type that a user can assign to a tool.

The following guidelines apply to tool kinds:

* Should user hyphens over underscores (e.g. `firestore-list-collections` or
  `firestore_list_colelctions`).
* Should use product name in name (e.g. `firestore-list-collections` over
  `list-collections`).
* Changes to tool kind are breaking changes and should be avoided.

## Testing

### Infrastructure

Toolbox uses both GitHub Actions and Cloud Build to run test workflows. Cloud
Build is used when Google credentials are required. Cloud Build uses test
project "toolbox-testing-438616".

### Linting

Run the lint check to ensure code quality:

```bash
golangci-lint run --fix
```

### Unit Tests

Execute unit tests locally:

```bash
go test -race -v ./...
```

### Integration Tests

#### Running Locally

1. **Environment Variables:** Set the required environment variables. Refer to
   the [Cloud Build testing configuration](./.ci/integration.cloudbuild.yaml)
   for a complete list of variables for each source.
    * `SERVICE_ACCOUNT_EMAIL`: Use your own GCP email.
    * `CLIENT_ID`: Use the Google Cloud SDK application Client ID. Contact
      Toolbox maintainers if you don't have it.
1. **Running Tests:** Run the integration test for your target source. Specify
   the required Go build tags at the top of each integration test file.

    ```shell
    go test -race -v ./tests/<YOUR_TEST_DIR>
    ```

    For example, to run the AlloyDB integration test:

    ```shell
    go test -race -v ./tests/alloydbpg
    ```

1. **Timeout:** The integration test should have a timeout on the server.
   Look for code like this:

   ```go
   ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
   defer cancel()

   cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
   if err != nil {
     t.Fatalf("command initialization returned an error: %s", err)
   }
   defer cleanup()
   ```

   Be sure to set the timeout to a reasonable value for your tests.

#### Running on Pull Requests

* **Internal Contributors:** Testing workflows should trigger automatically.
* **External Contributors:** Request Toolbox maintainers to trigger the testing
  workflows on your PR.

#### Test Resources

The following databases have been added as test resources. To add a new database
to test against, please contact the Toolbox maintainer team via an issue or PR.
Refer to the [Cloud Build testing
configuration](./.ci/integration.cloudbuild.yaml) for a complete list of
variables for each source.

* AlloyDB - setup in the test project
  * AI Natural Language ([setup
    instructions](https://cloud.google.com/alloydb/docs/ai/use-natural-language-generate-sql-queries))
    has been configured for `alloydb-ai-nl` tool tests
  * The Cloud Build service account is a user
* Bigtable - setup in the test project
  * The Cloud Build service account is a user
* BigQuery - setup in the test project
  * The Cloud Build service account is a user
* Cloud SQL Postgres - setup in the test project
  * The Cloud Build service account is a user
* Cloud SQL MySQL - setup in the test project
  * The Cloud Build service account is a user
* Cloud SQL SQL Server - setup in the test project
  * The Cloud Build service account is a user
* Couchbase - setup in the test project via the Marketplace
* DGraph - using the public dgraph interface <https://play.dgraph.io> for
  testing
* Memorystore Redis - setup in the test project using a Memorystore for Redis
  standalone instance
  * Memorystore Redis Cluster, Memorystore Valkey standalone, and Memorystore
    Valkey Cluster instances all require PSC connections, which requires extra
    security setup to connect from Cloud Build. Memorystore Redis standalone is
    the only one allowing PSA connection.
  * The Cloud Build service account is a user
* Memorystore Valkey - setup in the test project using a Memorystore for Redis
  standalone instance
  * The Cloud Build service account is a user
* MySQL - setup in the test project using a Cloud SQL instance
* Neo4j - setup in the test project on a GCE VM
* Postgres - setup in the test project using an AlloyDB instance
* Spanner - setup in the test project
  * The Cloud Build service account is a user
* SQL Server - setup in the test project using a Cloud SQL instance
* SQLite -  setup in the integration test, where we create a temporary database
  file

### Other GitHub Checks

* License header check (`.github/header-checker-lint.yml`) - Ensures files have
  the appropriate license
* CLA/google - Ensures the developer has signed the CLA:
  <https://cla.developers.google.com/>
* conventionalcommits.org - Ensures the commit messages are in the correct
  format. This repository uses tool [Release
  Please](https://github.com/googleapis/release-please) to create GitHub
  releases. It does so by parsing your git history, looking for [Conventional
  Commit messages](https://www.conventionalcommits.org/), and creating release
  PRs. Learn more by reading [How should I write my
  commits?](https://github.com/googleapis/release-please?tab=readme-ov-file#how-should-i-write-my-commits)

## Developing Documentation

### Running a Local Hugo Server

Follow these steps to preview documentation changes locally using a Hugo server:

1. **Install Hugo:** Ensure you have
   [Hugo](https://gohugo.io/installation/macos/) extended edition version
   0.146.0 or later installed.
1. **Navigate to the Hugo Directory:**

    ```bash
    cd .hugo
    ```

1. **Install Dependencies:**

    ```bash
    npm ci
    ```

1. **Start the Server:**

    ```bash
    hugo server
    ```

### Previewing Documentation on Pull Requests

### Document Versioning Setup

There are 3 GHA workflows we use to achieve document versioning:

1. **Deploy In-development docs:**
    This workflow is run on every commit merged into the main branch. It deploys the built site to the `/dev/` subdirectory for the in-development documentation.

1. **Deploy Versioned Docs:**
    When a new GitHub Release is published, it performs two deployments based on the new release tag.
    One to the new version subdirectory and one to the root directory of the versioned-gh-pages branch.

    **Note:** Before the release PR from release-please is merged, add the newest version into the hugo.toml file.

1. **Deploy Previous Version Docs:**
    This is a manual workflow, started from the GitHub Actions UI.
    To rebuild and redeploy documentation for an already released version that were released before this new system was in place. This workflow can be started on the UI by providing the git version tag which you want to create the documentation for.
    The specific versioned subdirectory and the root docs are updated on the versioned-gh-pages branch.

#### Contributors

Request a repo owner to run the preview deployment workflow on your PR. A
preview link will be automatically added as a comment to your PR.

#### Maintainers

1. **Inspect Changes:** Review the proposed changes in the PR to ensure they are
   safe and do not contain malicious code. Pay close attention to changes in the
   `.github/workflows/` directory.
1. **Deploy Preview:** Apply the `docs: deploy-preview` label to the PR to
   deploy a documentation preview.

## Building Toolbox

### Building the Binary

1. **Build Command:** Compile the Toolbox binary:

    ```bash
    go build -o toolbox
    ```

1. **Running the Binary:** Execute the compiled binary with optional flags. The
   server listens on port 5000 by default:

    ```bash
    ./toolbox
    ```

1. **Testing the Endpoint:** Verify the server is running by sending a request
   to the endpoint:

    ```bash
    curl http://127.0.0.1:5000
    ```

### Building Container Images

1. **Build Command:** Build the Toolbox container image:

    ```bash
    docker build -t toolbox:dev .
    ```

1. **View Image:** List available Docker images to confirm the build:

    ```bash
    docker images
    ```

1. **Run Container:** Run the Toolbox container image using Docker:

    ```bash
    docker run -d toolbox:dev
    ```

## Developing Toolbox SDKs

Refer to the [SDK developer
guide](https://github.com/googleapis/mcp-toolbox-sdk-python/blob/main/DEVELOPER.md)
for instructions on developing Toolbox SDKs.

## Maintainer Information

### Team

Team, `@googleapis/senseai-eco`, has been set as
[CODEOWNERS](.github/CODEOWNERS). The GitHub TeamSync tool is used to create
this team from MDB Group, `senseai-eco`.

### Releasing

Toolbox has two types of releases: versioned and continuous. It uses Google
Cloud project, `database-toolbox`.

* **Versioned Release:** Official, supported distributions tagged as `latest`.
  The release process is defined in
  [versioned.release.cloudbuild.yaml](.ci/versioned.release.cloudbuild.yaml).
* **Continuous Release:** Used for early testing of features between official
  releases and for end-to-end testing. The release process is defined in
  [continuous.release.cloudbuild.yaml](.ci/continuous.release.cloudbuild.yaml).
* **GitHub Release:** `.github/release-please.yml` automatically creates GitHub
  Releases and release PRs.

### How-to Release a new Version

1. [Optional] If you want to override the version number, send a
   [PR](https://github.com/googleapis/genai-toolbox/pull/31) to trigger
   [release-please](https://github.com/googleapis/release-please?tab=readme-ov-file#how-do-i-change-the-version-number).
   You can generate a commit with the following line: `git commit -m "chore:
   release 0.1.0" -m "Release-As: 0.1.0" --allow-empty`
1. [Optional] If you want to edit the changelog, send commits to the release PR
1. Approve and merge the PR with the title “[chore(main): release
   x.x.x](https://github.com/googleapis/genai-toolbox/pull/16)”
1. The
   [trigger](https://pantheon.corp.google.com/cloud-build/triggers;region=us-central1/edit/27bd0d21-264a-4446-b2d7-0df4e9915fb3?e=13802955&inv=1&invt=AbhU8A&mods=logs_tg_staging&project=database-toolbox)
   should automatically run when a new tag is pushed. You can view [triggered
   builds here to check the
   status](https://pantheon.corp.google.com/cloud-build/builds;region=us-central1?query=trigger_id%3D%2227bd0d21-264a-4446-b2d7-0df4e9915fb3%22&e=13802955&inv=1&invt=AbhU8A&mods=logs_tg_staging&project=database-toolbox)
1. Update the Github release notes to include the following table:
    1. Run the following command (from the root directory):

        ```
        export VERSION="v0.0.0"
        .ci/generate_release_table.sh
        ```

    1. Copy the table output
    1. In the GitHub UI, navigate to Releases and click the `edit` button.
    1. Paste the table at the bottom of release note and click `Update release`.
1. Post release in internal chat and on Discord.

#### Supported Binaries

The following operating systems and architectures are supported for binary
releases:

* linux/amd64
* darwin/arm64
* darwin/amd64
* windows/amd64

#### Supported Container Images

The following base container images are supported for container image releases:

* distroless

### Automated Tests

Integration and unit tests are automatically triggered via Cloud Build on each
pull request. Integration tests run on merge and nightly.

#### Failure notifications

On-merge and nightly tests that fail have notification setup via Cloud Build
Failure Reporter [GitHub Actions
Workflow](.github/workflows/schedule_reporter.yml).

#### Trigger Setup

Configure a Cloud Build trigger using the UI or `gcloud` with the following
settings:

* **Event:** Pull request
* **Region:** global (for default worker pools)
* **Source:**
  * Generation: 1st gen
  * Repo: googleapis/genai-toolbox (GitHub App)
  * Base branch: `^main$`
* **Comment control:** Required except for owners and collaborators
* **Filters:** Add directory filter
* **Config:** Cloud Build configuration file
  * Location: Repository (add path to file)
* **Service account:** Set for demo service to enable ID token creation for
  authenticated services

### Triggering Tests

Trigger pull request tests for external contributors by:

* **Cloud Build tests:** Comment `/gcbrun`
* **Unit tests:** Add the `tests:run` label

## Repo Setup & Automation

* .github/blunderbuss.yml - Auto-assign issues and PRs from GitHub teams
* .github/renovate.json5 - Tooling for dependency updates. Dependabot is built
  into the GitHub repo for GitHub security warnings
* go/github-issue-mirror - GitHub issues are automatically mirrored into buganizer
* (Suspended) .github/sync-repo-settings.yaml - configure repo settings
* .github/release-please.yml - Creates GitHub releases
* .github/ISSUE_TEMPLATE - templates for GitHub issues

```

--------------------------------------------------------------------------------
/internal/server/mcp.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package server

import (
	"bufio"
	"bytes"
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"net/http"
	"strings"
	"sync"
	"time"

	"github.com/go-chi/chi/v5"
	"github.com/go-chi/chi/v5/middleware"
	"github.com/go-chi/render"
	"github.com/google/uuid"
	"github.com/googleapis/genai-toolbox/internal/server/mcp"
	"github.com/googleapis/genai-toolbox/internal/server/mcp/jsonrpc"
	mcputil "github.com/googleapis/genai-toolbox/internal/server/mcp/util"
	v20241105 "github.com/googleapis/genai-toolbox/internal/server/mcp/v20241105"
	v20250326 "github.com/googleapis/genai-toolbox/internal/server/mcp/v20250326"
	"github.com/googleapis/genai-toolbox/internal/tools"
	"github.com/googleapis/genai-toolbox/internal/util"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/codes"
	"go.opentelemetry.io/otel/metric"
)

type sseSession struct {
	writer     http.ResponseWriter
	flusher    http.Flusher
	done       chan struct{}
	eventQueue chan string
	lastActive time.Time
}

// sseManager manages and control access to sse sessions
type sseManager struct {
	mu          sync.Mutex
	sseSessions map[string]*sseSession
}

func (m *sseManager) get(id string) (*sseSession, bool) {
	m.mu.Lock()
	defer m.mu.Unlock()
	session, ok := m.sseSessions[id]
	session.lastActive = time.Now()
	return session, ok
}

func newSseManager(ctx context.Context) *sseManager {
	sseM := &sseManager{
		mu:          sync.Mutex{},
		sseSessions: make(map[string]*sseSession),
	}
	go sseM.cleanupRoutine(ctx)
	return sseM
}

func (m *sseManager) add(id string, session *sseSession) {
	m.mu.Lock()
	defer m.mu.Unlock()
	m.sseSessions[id] = session
	session.lastActive = time.Now()
}

func (m *sseManager) remove(id string) {
	m.mu.Lock()
	delete(m.sseSessions, id)
	m.mu.Unlock()
}

func (m *sseManager) cleanupRoutine(ctx context.Context) {
	timeout := 10 * time.Minute
	ticker := time.NewTicker(timeout)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			return
		case <-ticker.C:
			func() {
				m.mu.Lock()
				defer m.mu.Unlock()
				now := time.Now()
				for id, sess := range m.sseSessions {
					if now.Sub(sess.lastActive) > timeout {
						delete(m.sseSessions, id)
					}
				}
			}()
		}
	}
}

type stdioSession struct {
	protocol string
	server   *Server
	reader   *bufio.Reader
	writer   io.Writer
}

func NewStdioSession(s *Server, stdin io.Reader, stdout io.Writer) *stdioSession {
	stdioSession := &stdioSession{
		server: s,
		reader: bufio.NewReader(stdin),
		writer: stdout,
	}
	return stdioSession
}

func (s *stdioSession) Start(ctx context.Context) error {
	return s.readInputStream(ctx)
}

// readInputStream reads requests/notifications from MCP clients through stdin
func (s *stdioSession) readInputStream(ctx context.Context) error {
	for {
		if err := ctx.Err(); err != nil {
			return err
		}
		line, err := s.readLine(ctx)
		if err != nil {
			if err == io.EOF {
				return nil
			}
			return err
		}
		v, res, err := processMcpMessage(ctx, []byte(line), s.server, s.protocol, "", nil)
		if err != nil {
			// errors during the processing of message will generate a valid MCP Error response.
			// server can continue to run.
			s.server.logger.ErrorContext(ctx, err.Error())
		}
		if v != "" {
			s.protocol = v
		}
		// no responses for notifications
		if res != nil {
			if err = s.write(ctx, res); err != nil {
				return err
			}
		}
	}
}

// readLine process each line within the input stream.
func (s *stdioSession) readLine(ctx context.Context) (string, error) {
	readChan := make(chan string, 1)
	errChan := make(chan error, 1)
	done := make(chan struct{})
	defer close(done)
	defer close(readChan)
	defer close(errChan)

	go func() {
		select {
		case <-done:
			return
		default:
			line, err := s.reader.ReadString('\n')
			if err != nil {
				select {
				case errChan <- err:
				case <-done:
				}
				return
			}
			select {
			case readChan <- line:
			case <-done:
			}
			return
		}
	}()

	select {
	// if context is cancelled, return an empty string
	case <-ctx.Done():
		return "", ctx.Err()
	// return error if error is found
	case err := <-errChan:
		return "", err
	// return line if successful
	case line := <-readChan:
		return line, nil
	}
}

// write writes to stdout with response to client
func (s *stdioSession) write(ctx context.Context, response any) error {
	res, _ := json.Marshal(response)

	_, err := fmt.Fprintf(s.writer, "%s\n", res)
	return err
}

// mcpRouter creates a router that represents the routes under /mcp
func mcpRouter(s *Server) (chi.Router, error) {
	r := chi.NewRouter()

	r.Use(middleware.AllowContentType("application/json", "application/json-rpc", "application/jsonrequest"))
	r.Use(middleware.StripSlashes)
	r.Use(render.SetContentType(render.ContentTypeJSON))

	r.Get("/sse", func(w http.ResponseWriter, r *http.Request) { sseHandler(s, w, r) })
	r.Get("/", func(w http.ResponseWriter, r *http.Request) { methodNotAllowed(s, w, r) })
	r.Post("/", func(w http.ResponseWriter, r *http.Request) { httpHandler(s, w, r) })
	r.Delete("/", func(w http.ResponseWriter, r *http.Request) {})

	r.Route("/{toolsetName}", func(r chi.Router) {
		r.Get("/sse", func(w http.ResponseWriter, r *http.Request) { sseHandler(s, w, r) })
		r.Get("/", func(w http.ResponseWriter, r *http.Request) { methodNotAllowed(s, w, r) })
		r.Post("/", func(w http.ResponseWriter, r *http.Request) { httpHandler(s, w, r) })
		r.Delete("/", func(w http.ResponseWriter, r *http.Request) {})
	})

	return r, nil
}

// sseHandler handles sse initialization and message.
func sseHandler(s *Server, w http.ResponseWriter, r *http.Request) {
	ctx, span := s.instrumentation.Tracer.Start(r.Context(), "toolbox/server/mcp/sse")
	r = r.WithContext(ctx)

	sessionId := uuid.New().String()
	toolsetName := chi.URLParam(r, "toolsetName")
	s.logger.DebugContext(ctx, fmt.Sprintf("toolset name: %s", toolsetName))
	span.SetAttributes(attribute.String("session_id", sessionId))
	span.SetAttributes(attribute.String("toolset_name", toolsetName))

	w.Header().Set("Content-Type", "text/event-stream")
	w.Header().Set("Cache-Control", "no-cache")
	w.Header().Set("Connection", "keep-alive")
	w.Header().Set("Access-Control-Allow-Origin", "*")

	var err error
	defer func() {
		if err != nil {
			span.SetStatus(codes.Error, err.Error())
		}
		span.End()
		status := "success"
		if err != nil {
			status = "error"
		}
		s.instrumentation.McpSse.Add(
			r.Context(),
			1,
			metric.WithAttributes(attribute.String("toolbox.toolset.name", toolsetName)),
			metric.WithAttributes(attribute.String("toolbox.sse.sessionId", sessionId)),
			metric.WithAttributes(attribute.String("toolbox.operation.status", status)),
		)
	}()

	flusher, ok := w.(http.Flusher)
	if !ok {
		err = fmt.Errorf("unable to retrieve flusher for sse")
		s.logger.DebugContext(ctx, err.Error())
		_ = render.Render(w, r, newErrResponse(err, http.StatusInternalServerError))
	}
	session := &sseSession{
		writer:     w,
		flusher:    flusher,
		done:       make(chan struct{}),
		eventQueue: make(chan string, 100),
	}
	s.sseManager.add(sessionId, session)
	defer s.sseManager.remove(sessionId)

	// https scheme formatting if (forwarded) request is a TLS request
	proto := r.Header.Get("X-Forwarded-Proto")
	if proto == "" {
		if r.TLS == nil {
			proto = "http"
		} else {
			proto = "https"
		}
	}

	// send initial endpoint event
	toolsetURL := ""
	if toolsetName != "" {
		toolsetURL = fmt.Sprintf("/%s", toolsetName)
	}
	messageEndpoint := fmt.Sprintf("%s://%s/mcp%s?sessionId=%s", proto, r.Host, toolsetURL, sessionId)
	s.logger.DebugContext(ctx, fmt.Sprintf("sending endpoint event: %s", messageEndpoint))
	fmt.Fprintf(w, "event: endpoint\ndata: %s\n\n", messageEndpoint)
	flusher.Flush()

	clientClose := r.Context().Done()
	for {
		select {
		// Ensure that only a single responses are written at once
		case event := <-session.eventQueue:
			fmt.Fprint(w, event)
			s.logger.DebugContext(ctx, fmt.Sprintf("sending event: %s", event))
			flusher.Flush()
			// channel for client disconnection
		case <-clientClose:
			close(session.done)
			s.logger.DebugContext(ctx, "client disconnected")
			return
		}
	}
}

// methodNotAllowed handles all mcp messages.
func methodNotAllowed(s *Server, w http.ResponseWriter, r *http.Request) {
	err := fmt.Errorf("toolbox does not support streaming in streamable HTTP transport")
	s.logger.DebugContext(r.Context(), err.Error())
	_ = render.Render(w, r, newErrResponse(err, http.StatusMethodNotAllowed))
}

// httpHandler handles all mcp messages.
func httpHandler(s *Server, w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")

	ctx, span := s.instrumentation.Tracer.Start(r.Context(), "toolbox/server/mcp")
	r = r.WithContext(ctx)
	ctx = util.WithLogger(r.Context(), s.logger)

	var sessionId, protocolVersion string
	var session *sseSession

	// check if client connects via sse
	// v2024-11-05 supports http with sse
	paramSessionId := r.URL.Query().Get("sessionId")
	if paramSessionId != "" {
		sessionId = paramSessionId
		protocolVersion = v20241105.PROTOCOL_VERSION
		var ok bool
		session, ok = s.sseManager.get(sessionId)
		if !ok {
			s.logger.DebugContext(ctx, "sse session not available")
		}
	}

	// check if client have `Mcp-Session-Id` header
	// `Mcp-Session-Id` is only set for v2025-03-26 in Toolbox
	headerSessionId := r.Header.Get("Mcp-Session-Id")
	if headerSessionId != "" {
		protocolVersion = v20250326.PROTOCOL_VERSION
	}

	// check if client have `MCP-Protocol-Version` header
	// Only supported for v2025-06-18+.
	headerProtocolVersion := r.Header.Get("MCP-Protocol-Version")
	if headerProtocolVersion != "" {
		if !mcp.VerifyProtocolVersion(headerProtocolVersion) {
			err := fmt.Errorf("invalid protocol version: %s", headerProtocolVersion)
			_ = render.Render(w, r, newErrResponse(err, http.StatusBadRequest))
			return
		}
		protocolVersion = headerProtocolVersion
	}

	toolsetName := chi.URLParam(r, "toolsetName")
	s.logger.DebugContext(ctx, fmt.Sprintf("toolset name: %s", toolsetName))
	span.SetAttributes(attribute.String("toolset_name", toolsetName))

	var err error
	defer func() {
		if err != nil {
			span.SetStatus(codes.Error, err.Error())
		}
		span.End()

		status := "success"
		if err != nil {
			status = "error"
		}
		s.instrumentation.McpPost.Add(
			r.Context(),
			1,
			metric.WithAttributes(attribute.String("toolbox.sse.sessionId", sessionId)),
			metric.WithAttributes(attribute.String("toolbox.operation.status", status)),
		)
	}()

	// Read and returns a body from io.Reader
	body, err := io.ReadAll(r.Body)
	if err != nil {
		// Generate a new uuid if unable to decode
		id := uuid.New().String()
		s.logger.DebugContext(ctx, err.Error())
		render.JSON(w, r, jsonrpc.NewError(id, jsonrpc.PARSE_ERROR, err.Error(), nil))
		return
	}

	v, res, err := processMcpMessage(ctx, body, s, protocolVersion, toolsetName, r.Header)
	if err != nil {
		s.logger.DebugContext(ctx, fmt.Errorf("error processing message: %w", err).Error())
	}

	// notifications will return empty string
	if res == nil {
		// Notifications do not expect a response
		// Toolbox doesn't do anything with notifications yet
		w.WriteHeader(http.StatusAccepted)
		return
	}

	// for v20250326, add the `Mcp-Session-Id` header
	if v == v20250326.PROTOCOL_VERSION {
		sessionId = uuid.New().String()
		w.Header().Set("Mcp-Session-Id", sessionId)
	}

	if session != nil {
		// queue sse event
		eventData, _ := json.Marshal(res)
		select {
		case session.eventQueue <- fmt.Sprintf("event: message\ndata: %s\n\n", eventData):
			s.logger.DebugContext(ctx, "event queue successful")
		case <-session.done:
			s.logger.DebugContext(ctx, "session is close")
		default:
			s.logger.DebugContext(ctx, "unable to add to event queue")
		}
	}
	if rpcResponse, ok := res.(jsonrpc.JSONRPCError); ok {
		code := rpcResponse.Error.Code
		switch code {
		case jsonrpc.INTERNAL_ERROR:
			w.WriteHeader(http.StatusInternalServerError)
		case jsonrpc.INVALID_REQUEST:
			errStr := err.Error()
			if errors.Is(err, tools.ErrUnauthorized) {
				w.WriteHeader(http.StatusUnauthorized)
			} else if strings.Contains(errStr, "Error 401") {
				w.WriteHeader(http.StatusUnauthorized)
			} else if strings.Contains(errStr, "Error 403") {
				w.WriteHeader(http.StatusForbidden)
			}
		}
	}

	// send HTTP response
	render.JSON(w, r, res)
}

// processMcpMessage process the messages received from clients
func processMcpMessage(ctx context.Context, body []byte, s *Server, protocolVersion string, toolsetName string, header http.Header) (string, any, error) {
	logger, err := util.LoggerFromContext(ctx)
	if err != nil {
		return "", jsonrpc.NewError("", jsonrpc.INTERNAL_ERROR, err.Error(), nil), err
	}

	// Generic baseMessage could either be a JSONRPCNotification or JSONRPCRequest
	var baseMessage jsonrpc.BaseMessage
	if err = util.DecodeJSON(bytes.NewBuffer(body), &baseMessage); err != nil {
		// Generate a new uuid if unable to decode
		id := uuid.New().String()

		// check if user is sending a batch request
		var a []any
		unmarshalErr := json.Unmarshal(body, &a)
		if unmarshalErr == nil {
			err = fmt.Errorf("not supporting batch requests")
			return "", jsonrpc.NewError(id, jsonrpc.INVALID_REQUEST, err.Error(), nil), err
		}

		return "", jsonrpc.NewError(id, jsonrpc.PARSE_ERROR, err.Error(), nil), err
	}

	// Check if method is present
	if baseMessage.Method == "" {
		err = fmt.Errorf("method not found")
		return "", jsonrpc.NewError(baseMessage.Id, jsonrpc.METHOD_NOT_FOUND, err.Error(), nil), err
	}
	logger.DebugContext(ctx, fmt.Sprintf("method is: %s", baseMessage.Method))

	// Check for JSON-RPC 2.0
	if baseMessage.Jsonrpc != jsonrpc.JSONRPC_VERSION {
		err = fmt.Errorf("invalid json-rpc version")
		return "", jsonrpc.NewError(baseMessage.Id, jsonrpc.INVALID_REQUEST, err.Error(), nil), err
	}

	// Check if message is a notification
	if baseMessage.Id == nil {
		err := mcp.NotificationHandler(ctx, body)
		return "", nil, err
	}

	switch baseMessage.Method {
	case mcputil.INITIALIZE:
		res, v, err := mcp.InitializeResponse(ctx, baseMessage.Id, body, s.version)
		if err != nil {
			return "", res, err
		}
		return v, res, err
	default:
		toolset, ok := s.ResourceMgr.GetToolset(toolsetName)
		if !ok {
			err = fmt.Errorf("toolset does not exist")
			return "", jsonrpc.NewError(baseMessage.Id, jsonrpc.INVALID_REQUEST, err.Error(), nil), err
		}
		res, err := mcp.ProcessMethod(ctx, protocolVersion, baseMessage.Id, baseMessage.Method, toolset, s.ResourceMgr.GetToolsMap(), s.ResourceMgr.GetAuthServiceMap(), body, header)
		return "", res, err
	}
}

```

--------------------------------------------------------------------------------
/internal/tools/firestore/firestorequerycollection/firestorequerycollection.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package firestorequerycollection

import (
	"context"
	"encoding/json"
	"fmt"
	"strings"

	firestoreapi "cloud.google.com/go/firestore"
	yaml "github.com/goccy/go-yaml"
	"github.com/googleapis/genai-toolbox/internal/sources"
	firestoreds "github.com/googleapis/genai-toolbox/internal/sources/firestore"
	"github.com/googleapis/genai-toolbox/internal/tools"
	"github.com/googleapis/genai-toolbox/internal/tools/firestore/util"
)

// Constants for tool configuration
const (
	kind            = "firestore-query-collection"
	defaultLimit    = 100
	defaultAnalyze  = false
	maxFilterLength = 100 // Maximum filters to prevent abuse
)

// Parameter keys
const (
	collectionPathKey = "collectionPath"
	filtersKey        = "filters"
	orderByKey        = "orderBy"
	limitKey          = "limit"
	analyzeQueryKey   = "analyzeQuery"
)

// Firestore operators
var validOperators = map[string]bool{
	"<":                  true,
	"<=":                 true,
	">":                  true,
	">=":                 true,
	"==":                 true,
	"!=":                 true,
	"array-contains":     true,
	"array-contains-any": true,
	"in":                 true,
	"not-in":             true,
}

// Error messages
const (
	errMissingCollectionPath = "invalid or missing '%s' parameter"
	errInvalidFilters        = "invalid '%s' parameter; expected an array"
	errFilterNotString       = "filter at index %d is not a string"
	errFilterParseFailed     = "failed to parse filter at index %d: %w"
	errInvalidOperator       = "unsupported operator: %s. Valid operators are: %v"
	errMissingFilterValue    = "no value specified for filter on field '%s'"
	errOrderByParseFailed    = "failed to parse orderBy: %w"
	errQueryExecutionFailed  = "failed to execute query: %w"
	errTooManyFilters        = "too many filters provided: %d (maximum: %d)"
)

func init() {
	if !tools.Register(kind, newConfig) {
		panic(fmt.Sprintf("tool kind %q already registered", kind))
	}
}

func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
	actual := Config{Name: name}
	if err := decoder.DecodeContext(ctx, &actual); err != nil {
		return nil, err
	}
	return actual, nil
}

// compatibleSource defines the interface for sources that can provide a Firestore client
type compatibleSource interface {
	FirestoreClient() *firestoreapi.Client
}

// validate compatible sources are still compatible
var _ compatibleSource = &firestoreds.Source{}

var compatibleSources = [...]string{firestoreds.SourceKind}

// Config represents the configuration for the Firestore query collection tool
type Config struct {
	Name         string   `yaml:"name" validate:"required"`
	Kind         string   `yaml:"kind" validate:"required"`
	Source       string   `yaml:"source" validate:"required"`
	Description  string   `yaml:"description" validate:"required"`
	AuthRequired []string `yaml:"authRequired"`
}

// validate interface
var _ tools.ToolConfig = Config{}

// ToolConfigKind returns the kind of tool configuration
func (cfg Config) ToolConfigKind() string {
	return kind
}

// Initialize creates a new Tool instance from the configuration
func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
	// verify source exists
	rawS, ok := srcs[cfg.Source]
	if !ok {
		return nil, fmt.Errorf("no source named %q configured", cfg.Source)
	}

	// verify the source is compatible
	s, ok := rawS.(compatibleSource)
	if !ok {
		return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
	}

	// Create parameters
	parameters := createParameters()

	mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, parameters)

	// finish tool setup
	t := Tool{
		Name:         cfg.Name,
		Kind:         kind,
		Parameters:   parameters,
		AuthRequired: cfg.AuthRequired,
		Client:       s.FirestoreClient(),
		manifest:     tools.Manifest{Description: cfg.Description, Parameters: parameters.Manifest(), AuthRequired: cfg.AuthRequired},
		mcpManifest:  mcpManifest,
	}
	return t, nil
}

// createParameters creates the parameter definitions for the tool
func createParameters() tools.Parameters {
	collectionPathParameter := tools.NewStringParameter(
		collectionPathKey,
		"The relative path to the Firestore collection to query (e.g., 'users' or 'users/userId/posts'). Note: This is a relative path, NOT an absolute path like 'projects/{project_id}/databases/{database_id}/documents/...'",
	)

	filtersDescription := `Array of filter objects to apply to the query. Each filter is a JSON string with:
- field: The field name to filter on
- op: The operator to use ("<", "<=", ">", ">=", "==", "!=", "array-contains", "array-contains-any", "in", "not-in")
- value: The value to compare against (can be string, number, boolean, or array)
Example: {"field": "age", "op": ">", "value": 18}`

	filtersParameter := tools.NewArrayParameter(
		filtersKey,
		filtersDescription,
		tools.NewStringParameter("item", "JSON string representation of a filter object"),
	)

	orderByParameter := tools.NewStringParameter(
		orderByKey,
		"JSON string specifying the field and direction to order by (e.g., {\"field\": \"name\", \"direction\": \"ASCENDING\"}). Leave empty if not specified",
	)

	limitParameter := tools.NewIntParameterWithDefault(
		limitKey,
		defaultLimit,
		"The maximum number of documents to return",
	)

	analyzeQueryParameter := tools.NewBooleanParameterWithDefault(
		analyzeQueryKey,
		defaultAnalyze,
		"If true, returns query explain metrics including execution statistics",
	)

	return tools.Parameters{
		collectionPathParameter,
		filtersParameter,
		orderByParameter,
		limitParameter,
		analyzeQueryParameter,
	}
}

// validate interface
var _ tools.Tool = Tool{}

// Tool represents the Firestore query collection tool
type Tool struct {
	Name         string           `yaml:"name"`
	Kind         string           `yaml:"kind"`
	AuthRequired []string         `yaml:"authRequired"`
	Parameters   tools.Parameters `yaml:"parameters"`

	Client      *firestoreapi.Client
	manifest    tools.Manifest
	mcpManifest tools.McpManifest
}

// FilterConfig represents a filter for the query
type FilterConfig struct {
	Field string      `json:"field"`
	Op    string      `json:"op"`
	Value interface{} `json:"value"`
}

// Validate checks if the filter configuration is valid
func (f *FilterConfig) Validate() error {
	if f.Field == "" {
		return fmt.Errorf("filter field cannot be empty")
	}

	if !validOperators[f.Op] {
		ops := make([]string, 0, len(validOperators))
		for op := range validOperators {
			ops = append(ops, op)
		}
		return fmt.Errorf(errInvalidOperator, f.Op, ops)
	}

	if f.Value == nil {
		return fmt.Errorf(errMissingFilterValue, f.Field)
	}

	return nil
}

// OrderByConfig represents ordering configuration
type OrderByConfig struct {
	Field     string `json:"field"`
	Direction string `json:"direction"`
}

// GetDirection returns the Firestore direction constant
func (o *OrderByConfig) GetDirection() firestoreapi.Direction {
	if strings.EqualFold(o.Direction, "DESCENDING") {
		return firestoreapi.Desc
	}
	return firestoreapi.Asc
}

// QueryResult represents a document result from the query
type QueryResult struct {
	ID         string         `json:"id"`
	Path       string         `json:"path"`
	Data       map[string]any `json:"data"`
	CreateTime interface{}    `json:"createTime,omitempty"`
	UpdateTime interface{}    `json:"updateTime,omitempty"`
	ReadTime   interface{}    `json:"readTime,omitempty"`
}

// QueryResponse represents the full response including optional metrics
type QueryResponse struct {
	Documents      []QueryResult  `json:"documents"`
	ExplainMetrics map[string]any `json:"explainMetrics,omitempty"`
}

// Invoke executes the Firestore query based on the provided parameters
func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
	// Parse parameters
	queryParams, err := t.parseQueryParameters(params)
	if err != nil {
		return nil, err
	}

	// Build the query
	query, err := t.buildQuery(queryParams)
	if err != nil {
		return nil, err
	}

	// Execute the query and return results
	return t.executeQuery(ctx, query, queryParams.AnalyzeQuery)
}

// queryParameters holds all parsed query parameters
type queryParameters struct {
	CollectionPath string
	Filters        []FilterConfig
	OrderBy        *OrderByConfig
	Limit          int
	AnalyzeQuery   bool
}

// parseQueryParameters extracts and validates parameters from the input
func (t Tool) parseQueryParameters(params tools.ParamValues) (*queryParameters, error) {
	mapParams := params.AsMap()

	// Get collection path
	collectionPath, ok := mapParams[collectionPathKey].(string)
	if !ok || collectionPath == "" {
		return nil, fmt.Errorf(errMissingCollectionPath, collectionPathKey)
	}

	// Validate collection path
	if err := util.ValidateCollectionPath(collectionPath); err != nil {
		return nil, fmt.Errorf("invalid collection path: %w", err)
	}

	result := &queryParameters{
		CollectionPath: collectionPath,
		Limit:          defaultLimit,
		AnalyzeQuery:   defaultAnalyze,
	}

	// Parse filters
	if filtersRaw, ok := mapParams[filtersKey]; ok && filtersRaw != nil {
		filters, err := t.parseFilters(filtersRaw)
		if err != nil {
			return nil, err
		}
		result.Filters = filters
	}

	// Parse orderBy
	if orderByRaw, ok := mapParams[orderByKey]; ok && orderByRaw != nil {
		orderBy, err := t.parseOrderBy(orderByRaw)
		if err != nil {
			return nil, err
		}
		result.OrderBy = orderBy
	}

	// Parse limit
	if limit, ok := mapParams[limitKey].(int); ok {
		result.Limit = limit
	}

	// Parse analyze
	if analyze, ok := mapParams[analyzeQueryKey].(bool); ok {
		result.AnalyzeQuery = analyze
	}

	return result, nil
}

// parseFilters parses and validates filter configurations
func (t Tool) parseFilters(filtersRaw interface{}) ([]FilterConfig, error) {
	filters, ok := filtersRaw.([]any)
	if !ok {
		return nil, fmt.Errorf(errInvalidFilters, filtersKey)
	}

	if len(filters) > maxFilterLength {
		return nil, fmt.Errorf(errTooManyFilters, len(filters), maxFilterLength)
	}

	result := make([]FilterConfig, 0, len(filters))
	for i, filterRaw := range filters {
		filterJSON, ok := filterRaw.(string)
		if !ok {
			return nil, fmt.Errorf(errFilterNotString, i)
		}

		var filter FilterConfig
		if err := json.Unmarshal([]byte(filterJSON), &filter); err != nil {
			return nil, fmt.Errorf(errFilterParseFailed, i, err)
		}

		if err := filter.Validate(); err != nil {
			return nil, fmt.Errorf("filter at index %d is invalid: %w", i, err)
		}

		result = append(result, filter)
	}

	return result, nil
}

// parseOrderBy parses the orderBy configuration
func (t Tool) parseOrderBy(orderByRaw interface{}) (*OrderByConfig, error) {
	orderByJSON, ok := orderByRaw.(string)
	if !ok || orderByJSON == "" {
		return nil, nil
	}

	var orderBy OrderByConfig
	if err := json.Unmarshal([]byte(orderByJSON), &orderBy); err != nil {
		return nil, fmt.Errorf(errOrderByParseFailed, err)
	}

	if orderBy.Field == "" {
		return nil, nil
	}

	return &orderBy, nil
}

// buildQuery constructs the Firestore query from parameters
func (t Tool) buildQuery(params *queryParameters) (*firestoreapi.Query, error) {
	collection := t.Client.Collection(params.CollectionPath)
	query := collection.Query

	// Apply filters
	if len(params.Filters) > 0 {
		filterConditions := make([]firestoreapi.EntityFilter, 0, len(params.Filters))
		for _, filter := range params.Filters {
			filterConditions = append(filterConditions, firestoreapi.PropertyFilter{
				Path:     filter.Field,
				Operator: filter.Op,
				Value:    filter.Value,
			})
		}

		query = query.WhereEntity(firestoreapi.AndFilter{
			Filters: filterConditions,
		})
	}

	// Apply ordering
	if params.OrderBy != nil {
		query = query.OrderBy(params.OrderBy.Field, params.OrderBy.GetDirection())
	}

	// Apply limit
	query = query.Limit(params.Limit)

	// Apply analyze options
	if params.AnalyzeQuery {
		query = query.WithRunOptions(firestoreapi.ExplainOptions{
			Analyze: true,
		})
	}

	return &query, nil
}

// executeQuery runs the query and formats the results
func (t Tool) executeQuery(ctx context.Context, query *firestoreapi.Query, analyzeQuery bool) (any, error) {
	docIterator := query.Documents(ctx)
	docs, err := docIterator.GetAll()
	if err != nil {
		return nil, fmt.Errorf(errQueryExecutionFailed, err)
	}

	// Convert results to structured format
	results := make([]QueryResult, len(docs))
	for i, doc := range docs {
		results[i] = QueryResult{
			ID:         doc.Ref.ID,
			Path:       doc.Ref.Path,
			Data:       doc.Data(),
			CreateTime: doc.CreateTime,
			UpdateTime: doc.UpdateTime,
			ReadTime:   doc.ReadTime,
		}
	}

	// Return with explain metrics if requested
	if analyzeQuery {
		explainMetrics, err := t.getExplainMetrics(docIterator)
		if err == nil && explainMetrics != nil {
			response := QueryResponse{
				Documents:      results,
				ExplainMetrics: explainMetrics,
			}
			return response, nil
		}
	}

	// Return just the documents
	resultsAny := make([]any, len(results))
	for i, r := range results {
		resultsAny[i] = r
	}
	return resultsAny, nil
}

// getExplainMetrics extracts explain metrics from the query iterator
func (t Tool) getExplainMetrics(docIterator *firestoreapi.DocumentIterator) (map[string]any, error) {
	explainMetrics, err := docIterator.ExplainMetrics()
	if err != nil || explainMetrics == nil {
		return nil, err
	}

	metricsData := make(map[string]any)

	// Add plan summary if available
	if explainMetrics.PlanSummary != nil {
		planSummary := make(map[string]any)
		planSummary["indexesUsed"] = explainMetrics.PlanSummary.IndexesUsed
		metricsData["planSummary"] = planSummary
	}

	// Add execution stats if available
	if explainMetrics.ExecutionStats != nil {
		executionStats := make(map[string]any)
		executionStats["resultsReturned"] = explainMetrics.ExecutionStats.ResultsReturned
		executionStats["readOperations"] = explainMetrics.ExecutionStats.ReadOperations

		if explainMetrics.ExecutionStats.ExecutionDuration != nil {
			executionStats["executionDuration"] = explainMetrics.ExecutionStats.ExecutionDuration.String()
		}

		if explainMetrics.ExecutionStats.DebugStats != nil {
			executionStats["debugStats"] = *explainMetrics.ExecutionStats.DebugStats
		}

		metricsData["executionStats"] = executionStats
	}

	return metricsData, nil
}

// ParseParams parses and validates input parameters
func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
	return tools.ParseParams(t.Parameters, data, claims)
}

// Manifest returns the tool manifest
func (t Tool) Manifest() tools.Manifest {
	return t.manifest
}

// McpManifest returns the MCP manifest
func (t Tool) McpManifest() tools.McpManifest {
	return t.mcpManifest
}

// Authorized checks if the tool is authorized based on verified auth services
func (t Tool) Authorized(verifiedAuthServices []string) bool {
	return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}

func (t Tool) RequiresClientAuthorization() bool {
	return false
}

```

--------------------------------------------------------------------------------
/internal/prebuiltconfigs/tools/cloud-sql-mysql-observability.yaml:
--------------------------------------------------------------------------------

```yaml
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sources:
  cloud-monitoring-source:
    kind: cloud-monitoring
tools:
  get_system_metrics:
    kind: cloud-monitoring-query-prometheus
    source: cloud-monitoring-source
    description: |
      Fetches system level cloudmonitoring data (timeseries metrics) for a MySQL instance using a PromQL query. Take projectId and instanceId from the user for which the metrics timeseries data needs to be fetched.
      To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.

      Generate PromQL `query` for MySQL system metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id` from user intent.

      Defaults:
      1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.

      PromQL Query Examples:
      1. Basic Time Series: `avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m])`
      2. Top K: `topk(30, avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      3. Mean: `avg(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      4. Minimum: `min(min_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      5. Maximum: `max(max_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      6. Sum: `sum(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      7. Count streams: `count(avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`
      8. Percentile with groupby on database_id: `quantile by ("database_id")(0.99,avg_over_time({"__name__"="cloudsql.googleapis.com/database/cpu/utilization","monitored_resource"="cloudsql_database","project_id"="my-projectId","database_id"="my-projectId:my-instanceId"}[5m]))`

      Available Metrics List: metricname. description. monitored resource. labels. database_id is actually the instance id and the format is `project_id:instance_id`. 
      1. `cloudsql.googleapis.com/database/cpu/utilization`: Current CPU utilization as a percentage of reserved CPU. `cloudsql_database`. `database`, `project_id`, `database_id`.
      2. `cloudsql.googleapis.com/database/network/connections`: Number of connections to the database instance. `cloudsql_database`. `database`, `project_id`, `database_id`.
      3. `cloudsql.googleapis.com/database/network/received_bytes_count`: Delta count of bytes received through the network. `cloudsql_database`. `database`, `project_id`, `database_id`.
      4. `cloudsql.googleapis.com/database/network/sent_bytes_count`: Delta count of bytes sent through the network. `cloudsql_database`. `destination`, `database`, `project_id`, `database_id`.
      5. `cloudsql.googleapis.com/database/memory/components`: Memory usage for components like usage, cache, and free memory. `cloudsql_database`. `component`, `database`, `project_id`, `database_id`.
      6. `cloudsql.googleapis.com/database/disk/bytes_used_by_data_type`: Data utilization in bytes. `cloudsql_database`. `data_type`, `database`, `project_id`, `database_id`.
      7. `cloudsql.googleapis.com/database/disk/read_ops_count`: Delta count of data disk read IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
      8. `cloudsql.googleapis.com/database/disk/write_ops_count`: Delta count of data disk write IO operations. `cloudsql_database`. `database`, `project_id`, `database_id`.
      9. `cloudsql.googleapis.com/database/mysql/queries`: Delta count of statements executed by the server. `cloudsql_database`. `database`, `project_id`, `database_id`.
      10. `cloudsql.googleapis.com/database/mysql/questions`: Delta count of statements sent by the client. `cloudsql_database`. `database`, `project_id`, `database_id`.
      11. `cloudsql.googleapis.com/database/mysql/received_bytes_count`: Delta count of bytes received by MySQL process. `cloudsql_database`. `database`, `project_id`, `database_id`.
      12. `cloudsql.googleapis.com/database/mysql/sent_bytes_count`: Delta count of bytes sent by MySQL process. `cloudsql_database`. `database`, `project_id`, `database_id`.
      13. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_dirty`: Number of unflushed pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
      14. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_free`: Number of unused pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
      15. `cloudsql.googleapis.com/database/mysql/innodb_buffer_pool_pages_total`: Total number of pages in the InnoDB buffer pool. `cloudsql_database`. `database`, `project_id`, `database_id`.
      16. `cloudsql.googleapis.com/database/mysql/innodb_data_fsyncs`: Delta count of InnoDB fsync() calls. `cloudsql_database`. `database`, `project_id`, `database_id`.
      17. `cloudsql.googleapis.com/database/mysql/innodb_os_log_fsyncs`: Delta count of InnoDB fsync() calls to the log file. `cloudsql_database`. `database`, `project_id`, `database_id`.
      18. `cloudsql.googleapis.com/database/mysql/innodb_pages_read`: Delta count of InnoDB pages read. `cloudsql_database`. `database`, `project_id`, `database_id`.
      19. `cloudsql.googleapis.com/database/mysql/innodb_pages_written`: Delta count of InnoDB pages written. `cloudsql_database`. `database`, `project_id`, `database_id`.
      20. `cloudsql.googleapis.com/database/mysql/open_tables`: The number of tables that are currently open. `cloudsql_database`. `database`, `project_id`, `database_id`.
      21. `cloudsql.googleapis.com/database/mysql/opened_table_count`: The number of tables opened since the last sample. `cloudsql_database`. `database`, `project_id`, `database_id`.
      22. `cloudsql.googleapis.com/database/mysql/open_table_definitions`: The number of table definitions currently cached. `cloudsql_database`. `database`, `project_id`, `database_id`.
      23. `cloudsql.googleapis.com/database/mysql/opened_table_definitions_count`: The number of table definitions cached since the last sample. `cloudsql_database`. `database`, `project_id`, `database_id`.
      24. `cloudsql.googleapis.com/database/mysql/innodb/dictionary_memory`: Memory allocated for the InnoDB dictionary cache. `cloudsql_database`. `database`, `project_id`, `database_id`.

  get_query_metrics:
    kind: cloud-monitoring-query-prometheus
    source: cloud-monitoring-source
    description: |
      Fetches query level cloudmonitoring data (timeseries metrics) for queries running in Mysql instance using a PromQL query. Take projectID and instanceID from the user for which the metrics timeseries data needs to be fetched.
      To use this tool, you must provide the Google Cloud `projectId` and a PromQL `query`.

      Generate PromQL `query` for Mysql query metrics. Use the provided metrics and rules to construct queries, Get the labels like `instance_id`, `query_hash` from user intent. If query_hash is provided then use the per_query metrics. Query hash and query id are same.

      Defaults:
      1. Interval: Use a default interval of `5m` for `_over_time` aggregation functions unless a different window is specified by the user.

      PromQL Query Examples:
      1. Basic Time Series: `avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m])`
      2. Top K: `topk(30, avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      3. Mean: `avg(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      4. Minimum: `min(min_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      5. Maximum: `max(max_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      6. Sum: `sum(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      7. Count streams: `count(avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`
      8. Percentile with groupby on resource_id, database: `quantile by ("resource_id","database")(0.99,avg_over_time({"__name__"="dbinsights.googleapis.com/aggregate/execution_time","monitored_resource"="cloudsql_instance_database","project_id"="my-projectId","resource_id"="my-projectId:my-instanceId"}[5m]))`

      Available Metrics List: metricname. description. monitored resource. labels. resource_id label format is `project_id:instance_id` which is actually instance id only. aggregate is the aggregated values for all query stats, Use aggregate metrics if query id is not provided. For perquery metrics do not fetch querystring unless specified by user specifically. Have the aggregation on query hash to avoid fetching the querystring. Do not use latency metrics for anything.
      1. `dbinsights.googleapis.com/aggregate/latencies`: Cumulative query latency distribution per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
      2. `dbinsights.googleapis.com/aggregate/execution_time`: Cumulative query execution time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
      3. `dbinsights.googleapis.com/aggregate/execution_count`: Total number of query executions per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
      4. `dbinsights.googleapis.com/aggregate/lock_time`: Cumulative lock wait time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `lock_type`, `database`, `project_id`, `resource_id`.
      5. `dbinsights.googleapis.com/aggregate/io_time`: Cumulative IO wait time per user and database. `cloudsql_instance_database`. `user`, `client_addr`, `database`, `project_id`, `resource_id`.
      6. `dbinsights.googleapis.com/aggregate/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `user`, `client_addr`, `row_status`, `database`, `project_id`, `resource_id`.
      7. `dbinsights.googleapis.com/perquery/latencies`: Cumulative query latency distribution per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
      8. `dbinsights.googleapis.com/perquery/execution_time`: Cumulative query execution time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
      9. `dbinsights.googleapis.com/perquery/execution_count`: Total number of query executions per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
      10. `dbinsights.googleapis.com/perquery/lock_time`: Cumulative lock wait time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `lock_type`, `query_hash`, `database`, `project_id`, `resource_id`.
      11. `dbinsights.googleapis.com/perquery/io_time`: Cumulative io wait time per user, database, and query. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `database`, `project_id`, `resource_id`.
      12. `dbinsights.googleapis.com/perquery/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `querystring`, `user`, `client_addr`, `query_hash`, `row_status`, `database`, `project_id`, `resource_id`.
      13. `dbinsights.googleapis.com/pertag/latencies`: Cumulative query latency distribution per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
      14. `dbinsights.googleapis.com/pertag/execution_time`: Cumulative query execution time per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
      15. `dbinsights.googleapis.com/pertag/execution_count`: Total number of query executions per user, database, and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
      16. `dbinsights.googleapis.com/pertag/lock_time`: Cumulative lock wait time per user, database and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `lock_type`, `tag_hash`, `database`, `project_id`, `resource_id`.
      17. `dbinsights.googleapis.com/pertag/io_time`: Cumulative IO wait time per user, database and tag. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `database`, `project_id`, `resource_id`.
      18. `dbinsights.googleapis.com/pertag/row_count`: Total number of rows affected during query execution. `cloudsql_instance_database`. `user`, `client_addr`, `action`, `application`, `controller`, `db_driver`, `framework`, `route`, `tag_hash`, `row_status`, `database`, `project_id`, `resource_id`.

toolsets:
  cloud_sql_mysql_cloud_monitoring_tools:
    - get_system_metrics
    - get_query_metrics
```

--------------------------------------------------------------------------------
/internal/tools/firestore/firestorequery/firestorequery_test.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package firestorequery_test

import (
	"testing"

	yaml "github.com/goccy/go-yaml"
	"github.com/google/go-cmp/cmp"
	"github.com/googleapis/genai-toolbox/internal/server"
	"github.com/googleapis/genai-toolbox/internal/testutils"
	"github.com/googleapis/genai-toolbox/internal/tools"
	"github.com/googleapis/genai-toolbox/internal/tools/firestore/firestorequery"
)

func TestParseFromYamlFirestoreQuery(t *testing.T) {
	ctx, err := testutils.ContextWithNewLogger()
	if err != nil {
		t.Fatalf("unexpected error: %s", err)
	}
	tcs := []struct {
		desc string
		in   string
		want server.ToolConfigs
	}{
		{
			desc: "basic example with parameterized collection path",
			in: `
			tools:
				query_users_tool:
					kind: firestore-query
					source: my-firestore-instance
					description: Query users collection with parameterized path
					collectionPath: "users/{{.userId}}/documents"
					parameters:
						- name: userId
						  type: string
						  description: The user ID to query documents for
						  required: true
			`,
			want: server.ToolConfigs{
				"query_users_tool": firestorequery.Config{
					Name:           "query_users_tool",
					Kind:           "firestore-query",
					Source:         "my-firestore-instance",
					Description:    "Query users collection with parameterized path",
					CollectionPath: "users/{{.userId}}/documents",
					AuthRequired:   []string{},
					Parameters: tools.Parameters{
						tools.NewStringParameterWithRequired("userId", "The user ID to query documents for", true),
					},
				},
			},
		},
		{
			desc: "with parameterized filters",
			in: `
			tools:
				query_products_tool:
					kind: firestore-query
					source: prod-firestore
					description: Query products with dynamic filters
					collectionPath: "products"
					filters: |
						{
							"and": [
								{"field": "category", "op": "==", "value": {"stringValue": "{{.category}}"}},
								{"field": "price", "op": "<=", "value": {"doubleValue": {{.maxPrice}}}}
							]
						}
					parameters:
						- name: category
						  type: string
						  description: Product category to filter by
						  required: true
						- name: maxPrice
						  type: float
						  description: Maximum price for products
						  required: true
			`,
			want: server.ToolConfigs{
				"query_products_tool": firestorequery.Config{
					Name:           "query_products_tool",
					Kind:           "firestore-query",
					Source:         "prod-firestore",
					Description:    "Query products with dynamic filters",
					CollectionPath: "products",
					Filters: `{
  "and": [
    {"field": "category", "op": "==", "value": {"stringValue": "{{.category}}"}},
    {"field": "price", "op": "<=", "value": {"doubleValue": {{.maxPrice}}}}
  ]
}
`,
					AuthRequired: []string{},
					Parameters: tools.Parameters{
						tools.NewStringParameterWithRequired("category", "Product category to filter by", true),
						tools.NewFloatParameterWithRequired("maxPrice", "Maximum price for products", true),
					},
				},
			},
		},
		{
			desc: "with select fields and orderBy",
			in: `
			tools:
				query_orders_tool:
					kind: firestore-query
					source: orders-firestore
					description: Query orders with field selection
					collectionPath: "orders"
					select:
						- orderId
						- customerName
						- totalAmount
					orderBy:
						field: "{{.sortField}}"
						direction: "DESCENDING"
					limit: 50
					parameters:
						- name: sortField
						  type: string
						  description: Field to sort by
						  required: true
			`,
			want: server.ToolConfigs{
				"query_orders_tool": firestorequery.Config{
					Name:           "query_orders_tool",
					Kind:           "firestore-query",
					Source:         "orders-firestore",
					Description:    "Query orders with field selection",
					CollectionPath: "orders",
					Select:         []string{"orderId", "customerName", "totalAmount"},
					OrderBy: map[string]any{
						"field":     "{{.sortField}}",
						"direction": "DESCENDING",
					},
					Limit:        "50",
					AuthRequired: []string{},
					Parameters: tools.Parameters{
						tools.NewStringParameterWithRequired("sortField", "Field to sort by", true),
					},
				},
			},
		},
		{
			desc: "with auth requirements and complex filters",
			in: `
			tools:
				secure_query_tool:
					kind: firestore-query
					source: secure-firestore
					description: Query with authentication and complex filters
					collectionPath: "{{.collection}}"
					filters: |
						{
							"or": [
								{
									"and": [
										{"field": "status", "op": "==", "value": {"stringValue": "{{.status}}"}},
										{"field": "priority", "op": ">=", "value": {"integerValue": "{{.minPriority}}"}}
									]
								},
								{"field": "urgent", "op": "==", "value": {"booleanValue": true}}
							]
						}
					analyzeQuery: true
					authRequired:
						- google-auth-service
						- api-key-service
					parameters:
						- name: collection
						  type: string
						  description: Collection name to query
						  required: true
						- name: status
						  type: string
						  description: Status to filter by
						  required: true
						- name: minPriority
						  type: integer
						  description: Minimum priority level
						  default: 1
			`,
			want: server.ToolConfigs{
				"secure_query_tool": firestorequery.Config{
					Name:           "secure_query_tool",
					Kind:           "firestore-query",
					Source:         "secure-firestore",
					Description:    "Query with authentication and complex filters",
					CollectionPath: "{{.collection}}",
					Filters: `{
  "or": [
    {
      "and": [
        {"field": "status", "op": "==", "value": {"stringValue": "{{.status}}"}},
        {"field": "priority", "op": ">=", "value": {"integerValue": "{{.minPriority}}"}}
      ]
    },
    {"field": "urgent", "op": "==", "value": {"booleanValue": true}}
  ]
}
`,
					AnalyzeQuery: true,
					AuthRequired: []string{"google-auth-service", "api-key-service"},
					Parameters: tools.Parameters{
						tools.NewStringParameterWithRequired("collection", "Collection name to query", true),
						tools.NewStringParameterWithRequired("status", "Status to filter by", true),
						tools.NewIntParameterWithDefault("minPriority", 1, "Minimum priority level"),
					},
				},
			},
		},
		{
			desc: "with Firestore native JSON value types and template parameters",
			in: `
			tools:
				query_with_typed_values:
					kind: firestore-query
					source: typed-firestore
					description: Query with Firestore native JSON value types
					collectionPath: "countries"
					filters: |
						{
							"or": [
								{"field": "continent", "op": "==", "value": {"stringValue": "{{.continent}}"}},
								{
									"and": [
										{"field": "area", "op": ">", "value": {"integerValue": "2000000"}},
										{"field": "area", "op": "<", "value": {"integerValue": "3000000"}},
										{"field": "population", "op": ">=", "value": {"integerValue": "{{.minPopulation}}"}},
										{"field": "gdp", "op": ">", "value": {"doubleValue": {{.minGdp}}}},
										{"field": "isActive", "op": "==", "value": {"booleanValue": {{.isActive}}}},
										{"field": "lastUpdated", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
									]
								}
							]
						}
					parameters:
						- name: continent
						  type: string
						  description: Continent to filter by
						  required: true
						- name: minPopulation
						  type: string
						  description: Minimum population as string
						  required: true
						- name: minGdp
						  type: float
						  description: Minimum GDP value
						  required: true
						- name: isActive
						  type: boolean
						  description: Filter by active status
						  required: true
						- name: startDate
						  type: string
						  description: Start date in RFC3339 format
						  required: true
			`,
			want: server.ToolConfigs{
				"query_with_typed_values": firestorequery.Config{
					Name:           "query_with_typed_values",
					Kind:           "firestore-query",
					Source:         "typed-firestore",
					Description:    "Query with Firestore native JSON value types",
					CollectionPath: "countries",
					Filters: `{
  "or": [
    {"field": "continent", "op": "==", "value": {"stringValue": "{{.continent}}"}},
    {
      "and": [
        {"field": "area", "op": ">", "value": {"integerValue": "2000000"}},
        {"field": "area", "op": "<", "value": {"integerValue": "3000000"}},
        {"field": "population", "op": ">=", "value": {"integerValue": "{{.minPopulation}}"}},
        {"field": "gdp", "op": ">", "value": {"doubleValue": {{.minGdp}}}},
        {"field": "isActive", "op": "==", "value": {"booleanValue": {{.isActive}}}},
        {"field": "lastUpdated", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
      ]
    }
  ]
}
`,
					AuthRequired: []string{},
					Parameters: tools.Parameters{
						tools.NewStringParameterWithRequired("continent", "Continent to filter by", true),
						tools.NewStringParameterWithRequired("minPopulation", "Minimum population as string", true),
						tools.NewFloatParameterWithRequired("minGdp", "Minimum GDP value", true),
						tools.NewBooleanParameterWithRequired("isActive", "Filter by active status", true),
						tools.NewStringParameterWithRequired("startDate", "Start date in RFC3339 format", true),
					},
				},
			},
		},
	}
	for _, tc := range tcs {
		t.Run(tc.desc, func(t *testing.T) {
			got := struct {
				Tools server.ToolConfigs `yaml:"tools"`
			}{}
			// Parse contents
			err := yaml.UnmarshalContext(ctx, testutils.FormatYaml(tc.in), &got)
			if err != nil {
				t.Fatalf("unable to unmarshal: %s", err)
			}
			if diff := cmp.Diff(tc.want, got.Tools); diff != "" {
				t.Fatalf("incorrect parse: diff %v", diff)
			}
		})
	}
}

func TestParseFromYamlMultipleQueryTools(t *testing.T) {
	ctx, err := testutils.ContextWithNewLogger()
	if err != nil {
		t.Fatalf("unexpected error: %s", err)
	}
	in := `
	tools:
		query_user_posts:
			kind: firestore-query
			source: social-firestore
			description: Query user posts with filtering
			collectionPath: "users/{{.userId}}/posts"
			filters: |
				{
					"and": [
						{"field": "visibility", "op": "==", "value": {"stringValue": "{{.visibility}}"}},
						{"field": "createdAt", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
					]
				}
			select:
				- title
				- content
				- likes
			orderBy:
				field: createdAt
				direction: "{{.sortOrder}}"
			limit: 20
			parameters:
				- name: userId
				  type: string
				  description: User ID whose posts to query
				  required: true
				- name: visibility
				  type: string
				  description: Post visibility (public, private, friends)
				  required: true
				- name: startDate
				  type: string
				  description: Start date for posts
				  required: true
				- name: sortOrder
				  type: string
				  description: Sort order (ASCENDING or DESCENDING)
				  default: "DESCENDING"
		query_inventory:
			kind: firestore-query
			source: inventory-firestore
			description: Query inventory items
			collectionPath: "warehouses/{{.warehouseId}}/inventory"
			filters: |
				{
					"field": "quantity", "op": "<", "value": {"integerValue": "{{.threshold}}"}}
			parameters:
				- name: warehouseId
				  type: string
				  description: Warehouse ID to check inventory
				  required: true
				- name: threshold
				  type: integer
				  description: Quantity threshold for low stock
				  required: true
		query_transactions:
			kind: firestore-query
			source: finance-firestore
			description: Query financial transactions
			collectionPath: "accounts/{{.accountId}}/transactions"
			filters: |
				{
					"or": [
						{"field": "type", "op": "==", "value": {"stringValue": "{{.transactionType}}"}},
						{"field": "amount", "op": ">", "value": {"doubleValue": {{.minAmount}}}}
					]
				}
			analyzeQuery: true
			authRequired:
				- finance-auth
			parameters:
				- name: accountId
				  type: string
				  description: Account ID for transactions
				  required: true
				- name: transactionType
				  type: string
				  description: Type of transaction
				  default: "all"
				- name: minAmount
				  type: float
				  description: Minimum transaction amount
				  default: 0
	`
	want := server.ToolConfigs{
		"query_user_posts": firestorequery.Config{
			Name:           "query_user_posts",
			Kind:           "firestore-query",
			Source:         "social-firestore",
			Description:    "Query user posts with filtering",
			CollectionPath: "users/{{.userId}}/posts",
			Filters: `{
  "and": [
    {"field": "visibility", "op": "==", "value": {"stringValue": "{{.visibility}}"}},
    {"field": "createdAt", "op": ">=", "value": {"timestampValue": "{{.startDate}}"}}
  ]
}
`,
			Select: []string{"title", "content", "likes"},
			OrderBy: map[string]any{
				"field":     "createdAt",
				"direction": "{{.sortOrder}}",
			},
			Limit:        "20",
			AuthRequired: []string{},
			Parameters: tools.Parameters{
				tools.NewStringParameterWithRequired("userId", "User ID whose posts to query", true),
				tools.NewStringParameterWithRequired("visibility", "Post visibility (public, private, friends)", true),
				tools.NewStringParameterWithRequired("startDate", "Start date for posts", true),
				tools.NewStringParameterWithDefault("sortOrder", "DESCENDING", "Sort order (ASCENDING or DESCENDING)"),
			},
		},
		"query_inventory": firestorequery.Config{
			Name:           "query_inventory",
			Kind:           "firestore-query",
			Source:         "inventory-firestore",
			Description:    "Query inventory items",
			CollectionPath: "warehouses/{{.warehouseId}}/inventory",
			Filters: `{
  "field": "quantity", "op": "<", "value": {"integerValue": "{{.threshold}}"}}
`,
			AuthRequired: []string{},
			Parameters: tools.Parameters{
				tools.NewStringParameterWithRequired("warehouseId", "Warehouse ID to check inventory", true),
				tools.NewIntParameterWithRequired("threshold", "Quantity threshold for low stock", true),
			},
		},
		"query_transactions": firestorequery.Config{
			Name:           "query_transactions",
			Kind:           "firestore-query",
			Source:         "finance-firestore",
			Description:    "Query financial transactions",
			CollectionPath: "accounts/{{.accountId}}/transactions",
			Filters: `{
  "or": [
    {"field": "type", "op": "==", "value": {"stringValue": "{{.transactionType}}"}},
    {"field": "amount", "op": ">", "value": {"doubleValue": {{.minAmount}}}}
  ]
}
`,
			AnalyzeQuery: true,
			AuthRequired: []string{"finance-auth"},
			Parameters: tools.Parameters{
				tools.NewStringParameterWithRequired("accountId", "Account ID for transactions", true),
				tools.NewStringParameterWithDefault("transactionType", "all", "Type of transaction"),
				tools.NewFloatParameterWithDefault("minAmount", 0, "Minimum transaction amount"),
			},
		},
	}

	got := struct {
		Tools server.ToolConfigs `yaml:"tools"`
	}{}
	// Parse contents
	err = yaml.UnmarshalContext(ctx, testutils.FormatYaml(in), &got)
	if err != nil {
		t.Fatalf("unable to unmarshal: %s", err)
	}
	if diff := cmp.Diff(want, got.Tools); diff != "" {
		t.Fatalf("incorrect parse: diff %v", diff)
	}
}

```

--------------------------------------------------------------------------------
/tests/neo4j/neo4j_integration_test.go:
--------------------------------------------------------------------------------

```go
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package neo4j

import (
	"bytes"
	"context"
	"encoding/json"
	"io"
	"net/http"
	"os"
	"reflect"
	"regexp"
	"strings"
	"testing"
	"time"

	"github.com/neo4j/neo4j-go-driver/v5/neo4j"

	"github.com/googleapis/genai-toolbox/internal/testutils"
	"github.com/googleapis/genai-toolbox/tests"
)

var (
	Neo4jSourceKind = "neo4j"
	Neo4jDatabase   = os.Getenv("NEO4J_DATABASE")
	Neo4jUri        = os.Getenv("NEO4J_URI")
	Neo4jUser       = os.Getenv("NEO4J_USER")
	Neo4jPass       = os.Getenv("NEO4J_PASS")
)

// getNeo4jVars retrieves necessary Neo4j connection details from environment variables.
// It fails the test if any required variable is not set.
func getNeo4jVars(t *testing.T) map[string]any {
	switch "" {
	case Neo4jDatabase:
		t.Fatal("'NEO4J_DATABASE' not set")
	case Neo4jUri:
		t.Fatal("'NEO4J_URI' not set")
	case Neo4jUser:
		t.Fatal("'NEO4J_USER' not set")
	case Neo4jPass:
		t.Fatal("'NEO4J_PASS' not set")
	}

	return map[string]any{
		"kind":     Neo4jSourceKind,
		"uri":      Neo4jUri,
		"database": Neo4jDatabase,
		"user":     Neo4jUser,
		"password": Neo4jPass,
	}
}

// TestNeo4jToolEndpoints sets up an integration test server and tests the API endpoints
// for various Neo4j tools, including cypher execution and schema retrieval.
func TestNeo4jToolEndpoints(t *testing.T) {
	sourceConfig := getNeo4jVars(t)
	ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
	defer cancel()

	var args []string

	// Write config into a file and pass it to the command.
	// This configuration defines the data source and the tools to be tested.
	toolsFile := map[string]any{
		"sources": map[string]any{
			"my-neo4j-instance": sourceConfig,
		},
		"tools": map[string]any{
			"my-simple-cypher-tool": map[string]any{
				"kind":        "neo4j-cypher",
				"source":      "my-neo4j-instance",
				"description": "Simple tool to test end to end functionality.",
				"statement":   "RETURN 1 as a;",
			},
			"my-simple-execute-cypher-tool": map[string]any{
				"kind":        "neo4j-execute-cypher",
				"source":      "my-neo4j-instance",
				"description": "Simple tool to test end to end functionality.",
			},
			"my-readonly-execute-cypher-tool": map[string]any{
				"kind":        "neo4j-execute-cypher",
				"source":      "my-neo4j-instance",
				"description": "A readonly cypher execution tool.",
				"readOnly":    true,
			},
			"my-schema-tool": map[string]any{
				"kind":        "neo4j-schema",
				"source":      "my-neo4j-instance",
				"description": "A tool to get the Neo4j schema.",
			},
			"my-schema-tool-with-cache": map[string]any{
				"kind":               "neo4j-schema",
				"source":             "my-neo4j-instance",
				"description":        "A schema tool with a custom cache expiration.",
				"cacheExpireMinutes": 10,
			},
			"my-populated-schema-tool": map[string]any{
				"kind":        "neo4j-schema",
				"source":      "my-neo4j-instance",
				"description": "A tool to get the Neo4j schema from a populated DB.",
			},
		},
	}
	cmd, cleanup, err := tests.StartCmd(ctx, toolsFile, args...)
	if err != nil {
		t.Fatalf("command initialization returned an error: %s", err)
	}
	defer cleanup()

	waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
	defer cancel()
	out, err := testutils.WaitForString(waitCtx, regexp.MustCompile(`Server ready to serve`), cmd.Out)
	if err != nil {
		t.Logf("toolbox command logs: \n%s", out)
		t.Fatalf("toolbox didn't start successfully: %s", err)
	}

	// Test tool `GET` endpoints to verify their manifests are correct.
	tcs := []struct {
		name string
		api  string
		want map[string]any
	}{
		{
			name: "get my-simple-cypher-tool",
			api:  "http://127.0.0.1:5000/api/tool/my-simple-cypher-tool/",
			want: map[string]any{
				"my-simple-cypher-tool": map[string]any{
					"description":  "Simple tool to test end to end functionality.",
					"parameters":   []any{},
					"authRequired": []any{},
				},
			},
		},
		{
			name: "get my-simple-execute-cypher-tool",
			api:  "http://127.0.0.1:5000/api/tool/my-simple-execute-cypher-tool/",
			want: map[string]any{
				"my-simple-execute-cypher-tool": map[string]any{
					"description": "Simple tool to test end to end functionality.",
					"parameters": []any{
						map[string]any{
							"name":        "cypher",
							"type":        "string",
							"required":    true,
							"description": "The cypher to execute.",
							"authSources": []any{},
						},
					},
					"authRequired": []any{},
				},
			},
		},
		{
			name: "get my-schema-tool",
			api:  "http://127.0.0.1:5000/api/tool/my-schema-tool/",
			want: map[string]any{
				"my-schema-tool": map[string]any{
					"description":  "A tool to get the Neo4j schema.",
					"parameters":   []any{},
					"authRequired": []any{},
				},
			},
		},
		{
			name: "get my-schema-tool-with-cache",
			api:  "http://127.0.0.1:5000/api/tool/my-schema-tool-with-cache/",
			want: map[string]any{
				"my-schema-tool-with-cache": map[string]any{
					"description":  "A schema tool with a custom cache expiration.",
					"parameters":   []any{},
					"authRequired": []any{},
				},
			},
		},
	}
	for _, tc := range tcs {
		t.Run(tc.name, func(t *testing.T) {
			resp, err := http.Get(tc.api)
			if err != nil {
				t.Fatalf("error when sending a request: %s", err)
			}
			defer resp.Body.Close()
			if resp.StatusCode != 200 {
				t.Fatalf("response status code is not 200")
			}

			var body map[string]interface{}
			err = json.NewDecoder(resp.Body).Decode(&body)
			if err != nil {
				t.Fatalf("error parsing response body")
			}

			got, ok := body["tools"]
			if !ok {
				t.Fatalf("unable to find tools in response body")
			}
			if !reflect.DeepEqual(got, tc.want) {
				t.Fatalf("got %q, want %q", got, tc.want)
			}
		})
	}

	// Test tool `invoke` endpoints to verify their functionality.
	invokeTcs := []struct {
		name               string
		api                string
		requestBody        io.Reader
		want               string
		wantStatus         int
		wantErrorSubstring string
		prepareData        func(t *testing.T)
		validateFunc       func(t *testing.T, body string)
	}{
		{
			name:        "invoke my-simple-cypher-tool",
			api:         "http://127.0.0.1:5000/api/tool/my-simple-cypher-tool/invoke",
			requestBody: bytes.NewBuffer([]byte(`{}`)),
			want:        "[{\"a\":1}]",
			wantStatus:  http.StatusOK,
		},
		{
			name:        "invoke my-simple-execute-cypher-tool",
			api:         "http://127.0.0.1:5000/api/tool/my-simple-execute-cypher-tool/invoke",
			requestBody: bytes.NewBuffer([]byte(`{"cypher": "RETURN 1 as a;"}`)),
			want:        "[{\"a\":1}]",
			wantStatus:  http.StatusOK,
		},
		{
			name:               "invoke readonly tool with write query",
			api:                "http://127.0.0.1:5000/api/tool/my-readonly-execute-cypher-tool/invoke",
			requestBody:        bytes.NewBuffer([]byte(`{"cypher": "CREATE (n:TestNode)"}`)),
			wantStatus:         http.StatusBadRequest,
			wantErrorSubstring: "this tool is read-only and cannot execute write queries",
		},
		{
			name:        "invoke my-schema-tool",
			api:         "http://127.0.0.1:5000/api/tool/my-schema-tool/invoke",
			requestBody: bytes.NewBuffer([]byte(`{}`)),
			wantStatus:  http.StatusOK,
			validateFunc: func(t *testing.T, body string) {
				var result map[string]any
				if err := json.Unmarshal([]byte(body), &result); err != nil {
					t.Fatalf("failed to unmarshal schema result: %v", err)
				}
				// Check for the presence of top-level keys in the schema response.
				expectedKeys := []string{"nodeLabels", "relationships", "constraints", "indexes", "databaseInfo", "statistics"}
				for _, key := range expectedKeys {
					if _, ok := result[key]; !ok {
						t.Errorf("expected key %q not found in schema response", key)
					}
				}
			},
		},
		{
			name:        "invoke my-schema-tool-with-cache",
			api:         "http://127.0.0.1:5000/api/tool/my-schema-tool-with-cache/invoke",
			requestBody: bytes.NewBuffer([]byte(`{}`)),
			wantStatus:  http.StatusOK,
			validateFunc: func(t *testing.T, body string) {
				var result map[string]any
				if err := json.Unmarshal([]byte(body), &result); err != nil {
					t.Fatalf("failed to unmarshal schema result: %v", err)
				}
				// Also check the structure of the schema response for the cached tool.
				expectedKeys := []string{"nodeLabels", "relationships", "constraints", "indexes", "databaseInfo", "statistics"}
				for _, key := range expectedKeys {
					if _, ok := result[key]; !ok {
						t.Errorf("expected key %q not found in schema response", key)
					}
				}
			},
		},
		{
			name:        "invoke my-schema-tool with populated data",
			api:         "http://127.0.0.1:5000/api/tool/my-populated-schema-tool/invoke",
			requestBody: bytes.NewBuffer([]byte(`{}`)),
			wantStatus:  http.StatusOK,
			prepareData: func(t *testing.T) {
				ctx := context.Background()
				driver, err := neo4j.NewDriverWithContext(Neo4jUri, neo4j.BasicAuth(Neo4jUser, Neo4jPass, ""))
				if err != nil {
					t.Fatalf("failed to create neo4j driver: %v", err)
				}

				// Helper to execute queries for setup and teardown.
				execute := func(query string) {
					session := driver.NewSession(ctx, neo4j.SessionConfig{DatabaseName: Neo4jDatabase})
					defer session.Close(ctx)
					// Use ExecuteWrite to ensure the query is committed before proceeding.
					_, err := session.ExecuteWrite(ctx, func(tx neo4j.ManagedTransaction) (any, error) {
						_, err := tx.Run(ctx, query, nil)
						return nil, err
					})

					// Don't fail the test on teardown errors (e.g., entity doesn't exist).
					if err != nil && !strings.Contains(query, "DROP") {
						t.Fatalf("query failed: %s\nerror: %v", query, err)
					}
				}

				// Teardown logic is deferred to ensure it runs even if the test fails.
				// The driver will be closed at the end of this block.
				t.Cleanup(func() {
					execute("DROP CONSTRAINT PersonNameUnique IF EXISTS")
					execute("DROP INDEX MovieTitleIndex IF EXISTS")
					execute("MATCH (n) DETACH DELETE n")
					if err := driver.Close(ctx); err != nil {
						t.Errorf("failed to close driver during cleanup: %v", err)
					}
				})

				// Setup: Create constraints, indexes, and data.
				execute("MERGE (p:Person {name: 'Alice'}) MERGE (m:Movie {title: 'The Matrix'}) MERGE (p)-[:ACTED_IN]->(m)")
				execute("CREATE CONSTRAINT PersonNameUnique IF NOT EXISTS FOR (p:Person) REQUIRE p.name IS UNIQUE")
				execute("CREATE INDEX MovieTitleIndex IF NOT EXISTS FOR (m:Movie) ON (m.title)")
			},
			validateFunc: func(t *testing.T, body string) {
				// Define structs for unmarshaling the detailed schema.
				type Property struct {
					Name  string   `json:"name"`
					Types []string `json:"types"`
				}
				type NodeLabel struct {
					Name       string     `json:"name"`
					Properties []Property `json:"properties"`
				}
				type Relationship struct {
					Type      string `json:"type"`
					StartNode string `json:"startNode"`
					EndNode   string `json:"endNode"`
				}
				type Constraint struct {
					Name       string   `json:"name"`
					Label      string   `json:"label"`
					Properties []string `json:"properties"`
				}
				type Index struct {
					Name       string   `json:"name"`
					Label      string   `json:"label"`
					Properties []string `json:"properties"`
				}
				type Schema struct {
					NodeLabels    []NodeLabel    `json:"nodeLabels"`
					Relationships []Relationship `json:"relationships"`
					Constraints   []Constraint   `json:"constraints"`
					Indexes       []Index        `json:"indexes"`
				}

				var schema Schema
				if err := json.Unmarshal([]byte(body), &schema); err != nil {
					t.Fatalf("failed to unmarshal schema json: %v\nResponse body: %s", err, body)
				}

				// --- Validate Node Labels and Properties ---
				var personLabelFound, movieLabelFound bool
				for _, l := range schema.NodeLabels {
					if l.Name == "Person" {
						personLabelFound = true
						propFound := false
						for _, p := range l.Properties {
							if p.Name == "name" {
								propFound = true
								break
							}
						}
						if !propFound {
							t.Errorf("expected Person label to have 'name' property, but it was not found")
						}
					}
					if l.Name == "Movie" {
						movieLabelFound = true
						propFound := false
						for _, p := range l.Properties {
							if p.Name == "title" {
								propFound = true
								break
							}
						}
						if !propFound {
							t.Errorf("expected Movie label to have 'title' property, but it was not found")
						}
					}
				}
				if !personLabelFound {
					t.Error("expected to find 'Person' in nodeLabels")
				}
				if !movieLabelFound {
					t.Error("expected to find 'Movie' in nodeLabels")
				}

				// --- Validate Relationships ---
				relFound := false
				for _, r := range schema.Relationships {
					if r.Type == "ACTED_IN" && r.StartNode == "Person" && r.EndNode == "Movie" {
						relFound = true
						break
					}
				}
				if !relFound {
					t.Errorf("expected to find relationship '(:Person)-[:ACTED_IN]->(:Movie)', but it was not found")
				}

				// --- Validate Constraints ---
				constraintFound := false
				for _, c := range schema.Constraints {
					if c.Name == "PersonNameUnique" && c.Label == "Person" {
						propFound := false
						for _, p := range c.Properties {
							if p == "name" {
								propFound = true
								break
							}
						}
						if propFound {
							constraintFound = true
							break
						}
					}
				}
				if !constraintFound {
					t.Errorf("expected to find constraint 'PersonNameUnique' on Person(name), but it was not found")
				}

				// --- Validate Indexes ---
				indexFound := false
				for _, i := range schema.Indexes {
					if i.Name == "MovieTitleIndex" && i.Label == "Movie" {
						propFound := false
						for _, p := range i.Properties {
							if p == "title" {
								propFound = true
								break
							}
						}
						if propFound {
							indexFound = true
							break
						}
					}
				}
				if !indexFound {
					t.Errorf("expected to find index 'MovieTitleIndex' on Movie(title), but it was not found")
				}
			},
		},
	}
	for _, tc := range invokeTcs {
		t.Run(tc.name, func(t *testing.T) {
			// Prepare data if a preparation function is provided.
			if tc.prepareData != nil {
				tc.prepareData(t)
			}

			resp, err := http.Post(tc.api, "application/json", tc.requestBody)
			if err != nil {
				t.Fatalf("error when sending a request: %s", err)
			}
			defer resp.Body.Close()
			if resp.StatusCode != tc.wantStatus {
				bodyBytes, _ := io.ReadAll(resp.Body)
				t.Fatalf("response status code: got %d, want %d: %s", resp.StatusCode, tc.wantStatus, string(bodyBytes))
			}

			if tc.wantStatus == http.StatusOK {
				var body map[string]interface{}
				err = json.NewDecoder(resp.Body).Decode(&body)
				if err != nil {
					t.Fatalf("error parsing response body")
				}
				got, ok := body["result"].(string)
				if !ok {
					t.Fatalf("unable to find result in response body")
				}

				if tc.validateFunc != nil {
					// Use the custom validation function if provided.
					tc.validateFunc(t, got)
				} else if got != tc.want {
					// Otherwise, perform a direct string comparison.
					t.Fatalf("unexpected value: got %q, want %q", got, tc.want)
				}
			} else {
				bodyBytes, err := io.ReadAll(resp.Body)
				if err != nil {
					t.Fatalf("failed to read error response body: %s", err)
				}
				bodyString := string(bodyBytes)
				if !strings.Contains(bodyString, tc.wantErrorSubstring) {
					t.Fatalf("response body %q does not contain expected error %q", bodyString, tc.wantErrorSubstring)
				}
			}
		})
	}
}

```

--------------------------------------------------------------------------------
/internal/tools/mssql/mssqllisttables/mssqllisttables.go:
--------------------------------------------------------------------------------

```go
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package mssqllisttables

import (
	"context"
	"database/sql"
	"fmt"

	yaml "github.com/goccy/go-yaml"
	"github.com/googleapis/genai-toolbox/internal/sources"
	"github.com/googleapis/genai-toolbox/internal/sources/cloudsqlmssql"
	"github.com/googleapis/genai-toolbox/internal/sources/mssql"
	"github.com/googleapis/genai-toolbox/internal/tools"
)

const kind string = "mssql-list-tables"

const listTablesStatement = `
	WITH table_info AS (
		SELECT
			t.object_id AS table_oid,
			s.name AS schema_name,
			t.name AS table_name,
			dp.name AS table_owner, -- Schema's owner principal name
			CAST(ep.value AS NVARCHAR(MAX)) AS table_comment, -- Cast for JSON compatibility
			CASE
				WHEN EXISTS ( -- Check if the table has more than one partition for any of its indexes or heap
					SELECT 1 FROM sys.partitions p
					WHERE p.object_id = t.object_id AND p.partition_number > 1
				) THEN 'PARTITIONED TABLE'
				ELSE 'TABLE'
			END AS object_type_detail
		FROM
			sys.tables t
		INNER JOIN
			sys.schemas s ON t.schema_id = s.schema_id
		LEFT JOIN
			sys.database_principals dp ON s.principal_id = dp.principal_id
		LEFT JOIN
			sys.extended_properties ep ON ep.major_id = t.object_id AND ep.minor_id = 0 AND ep.class = 1 AND ep.name = 'MS_Description'
		WHERE
			t.type = 'U' -- User tables
			AND s.name NOT IN ('sys', 'INFORMATION_SCHEMA', 'guest', 'db_owner', 'db_accessadmin', 'db_backupoperator', 'db_datareader', 'db_datawriter', 'db_ddladmin', 'db_denydatareader', 'db_denydatawriter', 'db_securityadmin')
			AND (@table_names IS NULL OR LTRIM(RTRIM(@table_names)) = '' OR t.name IN (SELECT LTRIM(RTRIM(value)) FROM STRING_SPLIT(@table_names, ',')))
	),
	columns_info AS (
		SELECT
			c.object_id AS table_oid,
			c.name AS column_name,
			CONCAT(
				UPPER(TY.name), -- Base type name
				CASE
					WHEN TY.name IN ('char', 'varchar', 'nchar', 'nvarchar', 'binary', 'varbinary') THEN
						CONCAT('(', IIF(c.max_length = -1, 'MAX', CAST(c.max_length / CASE WHEN TY.name IN ('nchar', 'nvarchar') THEN 2 ELSE 1 END AS VARCHAR(10))), ')')
					WHEN TY.name IN ('decimal', 'numeric') THEN
						CONCAT('(', c.precision, ',', c.scale, ')')
					WHEN TY.name IN ('datetime2', 'datetimeoffset', 'time') THEN
						CONCAT('(', c.scale, ')')
					ELSE ''
				END
			) AS data_type,
			c.column_id AS column_ordinal_position,
			IIF(c.is_nullable = 0, CAST(1 AS BIT), CAST(0 AS BIT)) AS is_not_nullable,
			dc.definition AS column_default,
			CAST(epc.value AS NVARCHAR(MAX)) AS column_comment
		FROM
			sys.columns c
		JOIN
			table_info ti ON c.object_id = ti.table_oid
		JOIN
			sys.types TY ON c.user_type_id = TY.user_type_id AND TY.is_user_defined = 0 -- Ensure we get base types
		LEFT JOIN
			sys.default_constraints dc ON c.object_id = dc.parent_object_id AND c.column_id = dc.parent_column_id
		LEFT JOIN
			sys.extended_properties epc ON epc.major_id = c.object_id AND epc.minor_id = c.column_id AND epc.class = 1 AND epc.name = 'MS_Description'
	),
	constraints_info AS (
		-- Primary Keys & Unique Constraints
		SELECT
			kc.parent_object_id AS table_oid,
			kc.name AS constraint_name,
			REPLACE(kc.type_desc, '_CONSTRAINT', '') AS constraint_type, -- 'PRIMARY_KEY', 'UNIQUE'
			STUFF((SELECT ', ' + col.name
				FROM sys.index_columns ic
				JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id
				WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id
				ORDER BY ic.key_ordinal
				FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS constraint_columns,
			NULL AS foreign_key_referenced_table,
			NULL AS foreign_key_referenced_columns,
			CASE kc.type
				WHEN 'PK' THEN 'PRIMARY KEY (' + STUFF((SELECT ', ' + col.name FROM sys.index_columns ic JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')'
				WHEN 'UQ' THEN 'UNIQUE (' + STUFF((SELECT ', ' + col.name FROM sys.index_columns ic JOIN sys.columns col ON ic.object_id = col.object_id AND ic.column_id = col.column_id WHERE ic.object_id = kc.parent_object_id AND ic.index_id = kc.unique_index_id ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')'
			END AS constraint_definition
		FROM sys.key_constraints kc
		JOIN table_info ti ON kc.parent_object_id = ti.table_oid
		UNION ALL
		-- Foreign Keys
		SELECT
			fk.parent_object_id AS table_oid,
			fk.name AS constraint_name,
			'FOREIGN KEY' AS constraint_type,
			STUFF((SELECT ', ' + pc.name
				FROM sys.foreign_key_columns fkc
				JOIN sys.columns pc ON fkc.parent_object_id = pc.object_id AND fkc.parent_column_id = pc.column_id
				WHERE fkc.constraint_object_id = fk.object_id
				ORDER BY fkc.constraint_column_id
				FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS constraint_columns,
			SCHEMA_NAME(rt.schema_id) + '.' + OBJECT_NAME(fk.referenced_object_id) AS foreign_key_referenced_table,
			STUFF((SELECT ', ' + rc.name
				FROM sys.foreign_key_columns fkc
				JOIN sys.columns rc ON fkc.referenced_object_id = rc.object_id AND fkc.referenced_column_id = rc.column_id
				WHERE fkc.constraint_object_id = fk.object_id
				ORDER BY fkc.constraint_column_id
				FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS foreign_key_referenced_columns,
			OBJECT_DEFINITION(fk.object_id) AS constraint_definition
		FROM sys.foreign_keys fk
		JOIN sys.tables rt ON fk.referenced_object_id = rt.object_id
		JOIN table_info ti ON fk.parent_object_id = ti.table_oid
		UNION ALL
		-- Check Constraints
		SELECT
			cc.parent_object_id AS table_oid,
			cc.name AS constraint_name,
			'CHECK' AS constraint_type,
			NULL AS constraint_columns, -- Definition includes column context
			NULL AS foreign_key_referenced_table,
			NULL AS foreign_key_referenced_columns,
			cc.definition AS constraint_definition
		FROM sys.check_constraints cc
		JOIN table_info ti ON cc.parent_object_id = ti.table_oid
	),
	indexes_info AS (
		SELECT
			i.object_id AS table_oid,
			i.name AS index_name,
			i.type_desc AS index_method, -- CLUSTERED, NONCLUSTERED, XML, etc.
			i.is_unique,
			i.is_primary_key AS is_primary,
			STUFF((SELECT ', ' + c.name
				FROM sys.index_columns ic
				JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
				WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 0
				ORDER BY ic.key_ordinal
				FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') AS index_columns,
			(
				'COLUMNS: (' + ISNULL(STUFF((SELECT ', ' + c.name + CASE WHEN ic.is_descending_key = 1 THEN ' DESC' ELSE '' END
										FROM sys.index_columns ic
										JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
										WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 0
										ORDER BY ic.key_ordinal FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, ''), 'N/A') + ')' +
				ISNULL(CHAR(13)+CHAR(10) + 'INCLUDE: (' + STUFF((SELECT ', ' + c.name
										FROM sys.index_columns ic
										JOIN sys.columns c ON i.object_id = c.object_id AND ic.column_id = c.column_id
										WHERE ic.object_id = i.object_id AND ic.index_id = i.index_id AND ic.is_included_column = 1
										ORDER BY ic.index_column_id FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)'), 1, 2, '') + ')', '') +
				ISNULL(CHAR(13)+CHAR(10) + 'FILTER: (' + i.filter_definition + ')', '')
			) AS index_definition_details
		FROM
			sys.indexes i
		JOIN
			table_info ti ON i.object_id = ti.table_oid
		WHERE i.type <> 0 -- Exclude Heaps
		AND i.name IS NOT NULL -- Exclude unnamed heap indexes; named indexes (PKs are often named) are preferred.
	),
	triggers_info AS (
		SELECT
			tr.parent_id AS table_oid,
			tr.name AS trigger_name,
			OBJECT_DEFINITION(tr.object_id) AS trigger_definition,
			CASE tr.is_disabled WHEN 0 THEN 'ENABLED' ELSE 'DISABLED' END AS trigger_enabled_state
		FROM
			sys.triggers tr
		JOIN
			table_info ti ON tr.parent_id = ti.table_oid
		WHERE
			tr.is_ms_shipped = 0
			AND tr.parent_class_desc = 'OBJECT_OR_COLUMN' -- DML Triggers on tables/views
	)
	SELECT
		ti.schema_name,
		ti.table_name AS object_name,
		CASE
			WHEN @output_format = 'simple' THEN
				(SELECT ti.table_name AS name FOR JSON PATH, WITHOUT_ARRAY_WRAPPER)
			ELSE
				(
					SELECT
						ti.schema_name AS schema_name,
						ti.table_name AS object_name,
						ti.object_type_detail AS object_type,
						ti.table_owner AS owner,
						ti.table_comment AS comment,
						JSON_QUERY(ISNULL((
							SELECT
								ci.column_name,
								ci.data_type,
								ci.column_ordinal_position,
								ci.is_not_nullable,
								ci.column_default,
								ci.column_comment
							FROM columns_info ci
							WHERE ci.table_oid = ti.table_oid
							ORDER BY ci.column_ordinal_position
							FOR JSON PATH
						), '[]')) AS columns,
						JSON_QUERY(ISNULL((
							SELECT
								cons.constraint_name,
								cons.constraint_type,
								cons.constraint_definition,
								JSON_QUERY(
									CASE
										WHEN cons.constraint_columns IS NOT NULL AND LTRIM(RTRIM(cons.constraint_columns)) <> ''
										THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(cons.constraint_columns, ',')) + ']'
										ELSE '[]'
									END
								) AS constraint_columns,
								cons.foreign_key_referenced_table,
								JSON_QUERY(
									CASE
										WHEN cons.foreign_key_referenced_columns IS NOT NULL AND LTRIM(RTRIM(cons.foreign_key_referenced_columns)) <> ''
										THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(cons.foreign_key_referenced_columns, ',')) + ']'
										ELSE '[]'
									END
								) AS foreign_key_referenced_columns
							FROM constraints_info cons
							WHERE cons.table_oid = ti.table_oid
							FOR JSON PATH
						), '[]')) AS constraints,
						JSON_QUERY(ISNULL((
							SELECT
								ii.index_name,
								ii.index_definition_details AS index_definition,
								ii.is_unique,
								ii.is_primary,
								ii.index_method,
								JSON_QUERY(
									CASE
										WHEN ii.index_columns IS NOT NULL AND LTRIM(RTRIM(ii.index_columns)) <> ''
										THEN '[' + (SELECT STRING_AGG('"' + LTRIM(RTRIM(value)) + '"', ',') FROM STRING_SPLIT(ii.index_columns, ',')) + ']'
										ELSE '[]'
									END
								) AS index_columns
							FROM indexes_info ii
							WHERE ii.table_oid = ti.table_oid
							FOR JSON PATH
						), '[]')) AS indexes,
						JSON_QUERY(ISNULL((
							SELECT
								tri.trigger_name,
								tri.trigger_definition,
								tri.trigger_enabled_state
							FROM triggers_info tri
							WHERE tri.table_oid = ti.table_oid
							FOR JSON PATH
						), '[]')) AS triggers
					FOR JSON PATH, WITHOUT_ARRAY_WRAPPER -- Creates a single JSON object for this table's details
				)
		END AS object_details
	FROM
		table_info ti
	ORDER BY
		ti.schema_name, ti.table_name;
`

func init() {
	if !tools.Register(kind, newConfig) {
		panic(fmt.Sprintf("tool kind %q already registered", kind))
	}
}

func newConfig(ctx context.Context, name string, decoder *yaml.Decoder) (tools.ToolConfig, error) {
	actual := Config{Name: name}
	if err := decoder.DecodeContext(ctx, &actual); err != nil {
		return nil, err
	}
	return actual, nil
}

type compatibleSource interface {
	MSSQLDB() *sql.DB
}

// validate compatible sources are still compatible
var _ compatibleSource = &cloudsqlmssql.Source{}
var _ compatibleSource = &mssql.Source{}

var compatibleSources = [...]string{cloudsqlmssql.SourceKind, mssql.SourceKind}

type Config struct {
	Name         string   `yaml:"name" validate:"required"`
	Kind         string   `yaml:"kind" validate:"required"`
	Source       string   `yaml:"source" validate:"required"`
	Description  string   `yaml:"description" validate:"required"`
	AuthRequired []string `yaml:"authRequired"`
}

// validate interface
var _ tools.ToolConfig = Config{}

func (cfg Config) ToolConfigKind() string {
	return kind
}

func (cfg Config) Initialize(srcs map[string]sources.Source) (tools.Tool, error) {
	// verify source exists
	rawS, ok := srcs[cfg.Source]
	if !ok {
		return nil, fmt.Errorf("no source named %q configured", cfg.Source)
	}

	// verify the source is compatible
	s, ok := rawS.(compatibleSource)
	if !ok {
		return nil, fmt.Errorf("invalid source for %q tool: source kind must be one of %q", kind, compatibleSources)
	}

	allParameters := tools.Parameters{
		tools.NewStringParameterWithDefault("table_names", "", "Optional: A comma-separated list of table names. If empty, details for all tables will be listed."),
		tools.NewStringParameterWithDefault("output_format", "detailed", "Optional: Use 'simple' for names only or 'detailed' for full info."),
	}
	paramManifest := allParameters.Manifest()
	mcpManifest := tools.GetMcpManifest(cfg.Name, cfg.Description, cfg.AuthRequired, allParameters)

	// finish tool setup
	t := Tool{
		Name:         cfg.Name,
		Kind:         kind,
		AllParams:    allParameters,
		AuthRequired: cfg.AuthRequired,
		Db:           s.MSSQLDB(),
		manifest:     tools.Manifest{Description: cfg.Description, Parameters: paramManifest, AuthRequired: cfg.AuthRequired},
		mcpManifest:  mcpManifest,
	}
	return t, nil
}

// validate interface
var _ tools.Tool = Tool{}

type Tool struct {
	Name         string           `yaml:"name"`
	Kind         string           `yaml:"kind"`
	AuthRequired []string         `yaml:"authRequired"`
	AllParams    tools.Parameters `yaml:"allParams"`

	Db          *sql.DB
	manifest    tools.Manifest
	mcpManifest tools.McpManifest
}

func (t Tool) Invoke(ctx context.Context, params tools.ParamValues, accessToken tools.AccessToken) (any, error) {
	paramsMap := params.AsMap()

	outputFormat, _ := paramsMap["output_format"].(string)
	if outputFormat != "simple" && outputFormat != "detailed" {
		return nil, fmt.Errorf("invalid value for output_format: must be 'simple' or 'detailed', but got %q", outputFormat)
	}

	namedArgs := []any{
		sql.Named("table_names", paramsMap["table_names"]),
		sql.Named("output_format", outputFormat),
	}

	rows, err := t.Db.QueryContext(ctx, listTablesStatement, namedArgs...)
	if err != nil {
		return nil, fmt.Errorf("unable to execute query: %w", err)
	}
	defer rows.Close()

	cols, err := rows.Columns()
	if err != nil {
		return nil, fmt.Errorf("unable to fetch column names: %w", err)
	}

	// create an array of values for each column, which can be re-used to scan each row
	rawValues := make([]any, len(cols))
	values := make([]any, len(cols))
	for i := range rawValues {
		values[i] = &rawValues[i]
	}

	var out []any
	for rows.Next() {
		err = rows.Scan(values...)
		if err != nil {
			return nil, fmt.Errorf("unable to parse row: %w", err)
		}
		vMap := make(map[string]any)
		for i, name := range cols {
			vMap[name] = rawValues[i]
		}
		out = append(out, vMap)
	}

	// Check if error occurred during iteration
	if err := rows.Err(); err != nil {
		return nil, fmt.Errorf("errors encountered during row iteration: %w", err)
	}

	return out, nil
}

func (t Tool) ParseParams(data map[string]any, claims map[string]map[string]any) (tools.ParamValues, error) {
	return tools.ParseParams(t.AllParams, data, claims)
}

func (t Tool) Manifest() tools.Manifest {
	return t.manifest
}

func (t Tool) McpManifest() tools.McpManifest {
	return t.mcpManifest
}

func (t Tool) Authorized(verifiedAuthServices []string) bool {
	return tools.IsAuthorized(t.AuthRequired, verifiedAuthServices)
}

func (t Tool) RequiresClientAuthorization() bool {
	return false
}

```
Page 23/35FirstPrevNextLast