#
tokens: 46730/50000 50/77 files (page 1/8)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 8. Use http://codebase.md/bytebase/dbhub?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .github
│   ├── CODEOWNERS
│   ├── copilot-instructions.md
│   └── workflows
│       ├── docker-publish.yml
│       ├── npm-publish.yml
│       └── run-tests.yml
├── .gitignore
├── .husky
│   └── pre-commit
├── .npmrc
├── .prettierrc.json
├── bun.lock
├── CLAUDE.md
├── Dockerfile
├── LICENSE
├── llms-full.txt
├── package.json
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── README.md
├── resources
│   ├── employee-sqlite
│   │   ├── employee.sql
│   │   ├── load_department.sql
│   │   ├── load_dept_emp.sql
│   │   ├── load_dept_manager.sql
│   │   ├── load_employee.sql
│   │   ├── load_salary1.sql
│   │   ├── load_title.sql
│   │   ├── object.sql
│   │   ├── show_elapsed.sql
│   │   └── test_employee_md5.sql
│   └── images
│       ├── claude-desktop.webp
│       ├── cursor.webp
│       ├── logo-full.svg
│       ├── logo-full.webp
│       ├── logo-icon-only.svg
│       ├── logo-text-only.svg
│       └── mcp-inspector.webp
├── scripts
│   └── setup-husky.sh
├── src
│   ├── __tests__
│   │   └── json-rpc-integration.test.ts
│   ├── config
│   │   ├── __tests__
│   │   │   ├── env.test.ts
│   │   │   └── ssh-config-integration.test.ts
│   │   ├── demo-loader.ts
│   │   └── env.ts
│   ├── connectors
│   │   ├── __tests__
│   │   │   ├── mariadb.integration.test.ts
│   │   │   ├── mysql.integration.test.ts
│   │   │   ├── postgres-ssh.integration.test.ts
│   │   │   ├── postgres.integration.test.ts
│   │   │   ├── shared
│   │   │   │   └── integration-test-base.ts
│   │   │   ├── sqlite.integration.test.ts
│   │   │   └── sqlserver.integration.test.ts
│   │   ├── interface.ts
│   │   ├── manager.ts
│   │   ├── mariadb
│   │   │   └── index.ts
│   │   ├── mysql
│   │   │   └── index.ts
│   │   ├── postgres
│   │   │   └── index.ts
│   │   ├── sqlite
│   │   │   └── index.ts
│   │   └── sqlserver
│   │       └── index.ts
│   ├── index.ts
│   ├── prompts
│   │   ├── db-explainer.ts
│   │   ├── index.ts
│   │   └── sql-generator.ts
│   ├── resources
│   │   ├── index.ts
│   │   ├── indexes.ts
│   │   ├── procedures.ts
│   │   ├── schema.ts
│   │   ├── schemas.ts
│   │   └── tables.ts
│   ├── server.ts
│   ├── tools
│   │   ├── __tests__
│   │   │   └── execute-sql.test.ts
│   │   ├── execute-sql.ts
│   │   └── index.ts
│   ├── types
│   │   ├── sql.ts
│   │   └── ssh.ts
│   └── utils
│       ├── __tests__
│       │   ├── safe-url.test.ts
│       │   ├── ssh-config-parser.test.ts
│       │   └── ssh-tunnel.test.ts
│       ├── allowed-keywords.ts
│       ├── dsn-obfuscate.ts
│       ├── response-formatter.ts
│       ├── safe-url.ts
│       ├── sql-row-limiter.ts
│       ├── ssh-config-parser.ts
│       └── ssh-tunnel.ts
├── tsconfig.json
├── tsup.config.ts
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/.npmrc:
--------------------------------------------------------------------------------

```
1 | # Skip husky install when installing in CI or production environments
2 | ignore-scripts=false
3 | engine-strict=true
4 | hoist=true
5 | enable-pre-post-scripts=true
6 | auto-install-peers=true
```

--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "printWidth": 100,
 3 |   "tabWidth": 2,
 4 |   "useTabs": false,
 5 |   "semi": true,
 6 |   "singleQuote": false,
 7 |   "trailingComma": "es5",
 8 |   "bracketSpacing": true,
 9 |   "arrowParens": "always"
10 | }
11 | 
```

--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------

```
 1 | # Git
 2 | .git
 3 | .gitignore
 4 | .github
 5 | 
 6 | # Node.js
 7 | node_modules
 8 | npm-debug.log
 9 | yarn-debug.log
10 | yarn-error.log
11 | pnpm-debug.log
12 | 
13 | # Build output
14 | dist
15 | node_modules
16 | 
17 | # Environment
18 | .env
19 | .env.*
20 | !.env.example
21 | 
22 | # Editor directories and files
23 | .vscode
24 | .idea
25 | *.suo
26 | *.ntvs*
27 | *.njsproj
28 | *.sln
29 | *.sw?
30 | 
31 | # OS generated files
32 | .DS_Store
33 | .DS_Store?
34 | ._*
35 | .Spotlight-V100
36 | .Trashes
37 | ehthumbs.db
38 | Thumbs.db
39 | 
40 | # Logs
41 | logs
42 | *.log
43 | 
44 | # Docker
45 | Dockerfile
46 | .dockerignore
47 | 
48 | # Project specific
49 | *.md
50 | !README.md
51 | LICENSE
52 | CLAUDE.md
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Dependencies
 2 | node_modules/
 3 | .pnp/
 4 | .pnp.js
 5 | vendor/
 6 | jspm_packages/
 7 | bower_components/
 8 | 
 9 | # Build outputs
10 | build/
11 | dist/
12 | out/
13 | *.min.js
14 | *.min.css
15 | 
16 | # Environment & config
17 | .env
18 | .env.local
19 | .env.development.local
20 | .env.test.local
21 | .env.production.local
22 | .venv
23 | env/
24 | venv/
25 | ENV/
26 | config.local.js
27 | *.local.json
28 | 
29 | # Logs
30 | logs
31 | *.log
32 | npm-debug.log*
33 | yarn-debug.log*
34 | yarn-error.log*
35 | lerna-debug.log*
36 | 
37 | # Cache and temp
38 | .npm
39 | .eslintcache
40 | .stylelintcache
41 | .cache/
42 | .parcel-cache/
43 | .next/
44 | .nuxt/
45 | .vuepress/dist
46 | .serverless/
47 | .fusebox/
48 | .dynamodb/
49 | .grunt
50 | .temp
51 | .tmp
52 | .sass-cache/
53 | __pycache__/
54 | *.py[cod]
55 | *$py.class
56 | .pytest_cache/
57 | .coverage
58 | htmlcov/
59 | coverage/
60 | .nyc_output/
61 | 
62 | # OS files
63 | .DS_Store
64 | Thumbs.db
65 | ehthumbs.db
66 | Desktop.ini
67 | $RECYCLE.BIN/
68 | *.lnk
69 | 
70 | # Editor directories and files
71 | .idea/
72 | .vscode/
73 | *.swp
74 | *.swo
75 | *~
76 | .*.sw[a-p]
77 | *.sublime-workspace
78 | *.sublime-project
79 | 
80 | # Compiled binaries
81 | *.com
82 | *.class
83 | *.dll
84 | *.exe
85 | *.o
86 | *.so 
```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
 1 | # DBHub Configuration
 2 | 
 3 | # Method 1: Connection String (DSN)
 4 | # Use one of these DSN formats:
 5 | # DSN=postgres://user:password@localhost:5432/dbname
 6 | # DSN=sqlite:///path/to/database.db
 7 | # DSN=sqlite::memory:
 8 | # DSN=sqlserver://user:password@localhost:1433/dbname
 9 | # DSN=mysql://user:password@localhost:3306/dbname
10 | DSN=
11 | 
12 | # Method 2: Individual Database Parameters
13 | # Use this method if your password contains special characters like @, :, /, #, etc.
14 | # that would break URL parsing in the DSN format above
15 | # DB_TYPE=postgres
16 | # DB_HOST=localhost
17 | # DB_PORT=5432
18 | # DB_USER=postgres
19 | # DB_PASSWORD=my@password:with/special#chars
20 | # DB_NAME=mydatabase
21 | 
22 | # Supported DB_TYPE values: postgres, mysql, mariadb, sqlserver, sqlite
23 | # DB_PORT is optional - defaults to standard port for each database type
24 | # For SQLite: only DB_TYPE and DB_NAME are required (DB_NAME is the file path)
25 | 
26 | # Transport configuration
27 | # --transport=stdio (default) for stdio transport
28 | # --transport=sse for SSE transport with HTTP server
29 | TRANSPORT=stdio
30 | 
31 | # Server port for SSE transport (default: 3000)
32 | PORT=3000
33 | 
34 | # SSH Tunnel Configuration (optional)
35 | # Use these settings to connect through an SSH bastion host
36 | # SSH_HOST=bastion.example.com
37 | # SSH_PORT=22
38 | # SSH_USER=ubuntu
39 | # SSH_PASSWORD=mypassword
40 | # SSH_KEY=~/.ssh/id_rsa
41 | # SSH_PASSPHRASE=mykeypassphrase
42 | 
43 | # Read-only mode (optional)
44 | # Set to true to restrict SQL execution to read-only operations
45 | # READONLY=false
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | > [!NOTE]  
  2 | > Brought to you by [Bytebase](https://www.bytebase.com/), open-source database DevSecOps platform.
  3 | 
  4 | <p align="center">
  5 | <a href="https://dbhub.ai/" target="_blank">
  6 | <picture>
  7 |   <img src="https://raw.githubusercontent.com/bytebase/dbhub/main/resources/images/logo-full.webp" width="50%">
  8 | </picture>
  9 | </a>
 10 | </p>
 11 | 
 12 | <p align="center">
 13 |   <a href="https://discord.gg/BjEkZpsJzn"><img src="https://img.shields.io/badge/%20-Hang%20out%20on%20Discord-5865F2?style=for-the-badge&logo=discord&labelColor=EEEEEE" alt="Join our Discord" height="32" /></a>
 14 | </p>
 15 | 
 16 | <p>
 17 | Add to Cursor by copying the below link to browser
 18 | 
 19 | ```text
 20 | cursor://anysphere.cursor-deeplink/mcp/install?name=dbhub&config=eyJjb21tYW5kIjoibnB4IEBieXRlYmFzZS9kYmh1YiIsImVudiI6eyJUUkFOU1BPUlQiOiJzdGRpbyIsIkRTTiI6InBvc3RncmVzOi8vdXNlcjpwYXNzd29yZEBsb2NhbGhvc3Q6NTQzMi9kYm5hbWU%2Fc3NsbW9kZT1kaXNhYmxlIiwiUkVBRE9OTFkiOiJ0cnVlIn19
 21 | ```
 22 | 
 23 | </p>
 24 | 
 25 | DBHub is a universal database gateway implementing the Model Context Protocol (MCP) server interface. This gateway allows MCP-compatible clients to connect to and explore different databases.
 26 | 
 27 | ```bash
 28 |  +------------------+    +--------------+    +------------------+
 29 |  |                  |    |              |    |                  |
 30 |  |                  |    |              |    |                  |
 31 |  |  Claude Desktop  +--->+              +--->+    PostgreSQL    |
 32 |  |                  |    |              |    |                  |
 33 |  |  Claude Code     +--->+              +--->+    SQL Server    |
 34 |  |                  |    |              |    |                  |
 35 |  |  Cursor          +--->+    DBHub     +--->+    SQLite        |
 36 |  |                  |    |              |    |                  |
 37 |  |  Other Clients   +--->+              +--->+    MySQL         |
 38 |  |                  |    |              |    |                  |
 39 |  |                  |    |              +--->+    MariaDB       |
 40 |  |                  |    |              |    |                  |
 41 |  |                  |    |              |    |                  |
 42 |  +------------------+    +--------------+    +------------------+
 43 |       MCP Clients           MCP Server             Databases
 44 | ```
 45 | 
 46 | ## Supported Matrix
 47 | 
 48 | ### Database Resources
 49 | 
 50 | | Resource Name               | URI Format                                             | PostgreSQL | MySQL | MariaDB | SQL Server | SQLite |
 51 | | --------------------------- | ------------------------------------------------------ | :--------: | :---: | :-----: | :--------: | :----: |
 52 | | schemas                     | `db://schemas`                                         |     ✅     |  ✅   |   ✅    |     ✅     |   ✅   |
 53 | | tables_in_schema            | `db://schemas/{schemaName}/tables`                     |     ✅     |  ✅   |   ✅    |     ✅     |   ✅   |
 54 | | table_structure_in_schema   | `db://schemas/{schemaName}/tables/{tableName}`         |     ✅     |  ✅   |   ✅    |     ✅     |   ✅   |
 55 | | indexes_in_table            | `db://schemas/{schemaName}/tables/{tableName}/indexes` |     ✅     |  ✅   |   ✅    |     ✅     |   ✅   |
 56 | | procedures_in_schema        | `db://schemas/{schemaName}/procedures`                 |     ✅     |  ✅   |   ✅    |     ✅     |   ❌   |
 57 | | procedure_details_in_schema | `db://schemas/{schemaName}/procedures/{procedureName}` |     ✅     |  ✅   |   ✅    |     ✅     |   ❌   |
 58 | 
 59 | ### Database Tools
 60 | 
 61 | | Tool        | Command Name  | Description                                                         | PostgreSQL | MySQL | MariaDB | SQL Server | SQLite |
 62 | | ----------- | ------------- | ------------------------------------------------------------------- | :--------: | :---: | :-----: | :--------: | ------ |
 63 | | Execute SQL | `execute_sql` | Execute single or multiple SQL statements (separated by semicolons) |     ✅     |  ✅   |   ✅    |     ✅     | ✅     |
 64 | 
 65 | ### Prompt Capabilities
 66 | 
 67 | | Prompt              | Command Name   | PostgreSQL | MySQL | MariaDB | SQL Server | SQLite |
 68 | | ------------------- | -------------- | :--------: | :---: | :-----: | :--------: | ------ |
 69 | | Generate SQL        | `generate_sql` |     ✅     |  ✅   |   ✅    |     ✅     | ✅     |
 70 | | Explain DB Elements | `explain_db`   |     ✅     |  ✅   |   ✅    |     ✅     | ✅     |
 71 | 
 72 | ## Installation
 73 | 
 74 | ### Docker
 75 | 
 76 | ```bash
 77 | # PostgreSQL example
 78 | docker run --rm --init \
 79 |    --name dbhub \
 80 |    --publish 8080:8080 \
 81 |    bytebase/dbhub \
 82 |    --transport http \
 83 |    --port 8080 \
 84 |    --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
 85 | ```
 86 | 
 87 | ```bash
 88 | # Demo mode with sqlite sample employee database
 89 | docker run --rm --init \
 90 |    --name dbhub \
 91 |    --publish 8080:8080 \
 92 |    bytebase/dbhub \
 93 |    --transport http \
 94 |    --port 8080 \
 95 |    --demo
 96 | ```
 97 | 
 98 | **Docker Compose Setup:**
 99 | 
100 | If you're using Docker Compose for development, add DBHub to your `docker-compose.yml`:
101 | 
102 | ```yaml
103 | dbhub:
104 |   image: bytebase/dbhub:latest
105 |   container_name: dbhub
106 |   ports:
107 |     - "8080:8080"
108 |   environment:
109 |     - DBHUB_LOG_LEVEL=info
110 |   command:
111 |     - --transport
112 |     - http
113 |     - --port
114 |     - "8080"
115 |     - --dsn
116 |     - "postgres://user:password@database:5432/dbname"
117 |   depends_on:
118 |     - database
119 | ```
120 | 
121 | ### NPM
122 | 
123 | ```bash
124 | # PostgreSQL example
125 | npx @bytebase/dbhub --transport http --port 8080 --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
126 | 
127 | # Demo mode with sqlite sample employee database
128 | npx @bytebase/dbhub --transport http --port 8080 --demo
129 | ```
130 | 
131 | ```bash
132 | # Demo mode with sample employee database
133 | npx @bytebase/dbhub --transport http --port 8080 --demo
134 | ```
135 | 
136 | > Note: The demo mode includes a bundled SQLite sample "employee" database with tables for employees, departments, salaries, and more.
137 | 
138 | ### Claude Desktop
139 | 
140 | ![claude-desktop](https://raw.githubusercontent.com/bytebase/dbhub/main/resources/images/claude-desktop.webp)
141 | 
142 | - Claude Desktop only supports `stdio` transport https://github.com/orgs/modelcontextprotocol/discussions/16
143 | 
144 | ```json
145 | // claude_desktop_config.json
146 | {
147 |   "mcpServers": {
148 |     "dbhub-postgres-docker": {
149 |       "command": "docker",
150 |       "args": [
151 |         "run",
152 |         "-i",
153 |         "--rm",
154 |         "bytebase/dbhub",
155 |         "--transport",
156 |         "stdio",
157 |         "--dsn",
158 |         // Use host.docker.internal as the host if connecting to the local db
159 |         "postgres://user:[email protected]:5432/dbname?sslmode=disable"
160 |       ]
161 |     },
162 |     "dbhub-postgres-npx": {
163 |       "command": "npx",
164 |       "args": [
165 |         "-y",
166 |         "@bytebase/dbhub",
167 |         "--transport",
168 |         "stdio",
169 |         "--dsn",
170 |         "postgres://user:password@localhost:5432/dbname?sslmode=disable"
171 |       ]
172 |     },
173 |     "dbhub-demo": {
174 |       "command": "npx",
175 |       "args": ["-y", "@bytebase/dbhub", "--transport", "stdio", "--demo"]
176 |     }
177 |   }
178 | }
179 | ```
180 | 
181 | ### Claude Code
182 | 
183 | Check https://docs.anthropic.com/en/docs/claude-code/mcp
184 | 
185 | ### Cursor
186 | 
187 | <p>
188 | Add to Cursor by copying the below link to browser
189 | 
190 | ```text
191 | cursor://anysphere.cursor-deeplink/mcp/install?name=dbhub&config=eyJjb21tYW5kIjoibnB4IEBieXRlYmFzZS9kYmh1YiIsImVudiI6eyJUUkFOU1BPUlQiOiJzdGRpbyIsIkRTTiI6InBvc3RncmVzOi8vdXNlcjpwYXNzd29yZEBsb2NhbGhvc3Q6NTQzMi9kYm5hbWU%2Fc3NsbW9kZT1kaXNhYmxlIiwiUkVBRE9OTFkiOiJ0cnVlIn19
192 | ```
193 | 
194 | </p>
195 | 
196 | ![cursor](https://raw.githubusercontent.com/bytebase/dbhub/main/resources/images/cursor.webp)
197 | 
198 | - Cursor supports both `stdio` and `http`.
199 | - Follow [Cursor MCP guide](https://docs.cursor.com/context/model-context-protocol) and make sure to use [Agent](https://docs.cursor.com/chat/agent) mode.
200 | 
201 | ### VSCode + Copilot
202 | 
203 | Check https://code.visualstudio.com/docs/copilot/customization/mcp-servers
204 | 
205 | VSCode with GitHub Copilot can connect to DBHub via both `stdio` and `http` transports. This enables AI agents to interact with your development database through a secure interface.
206 | 
207 | - VSCode supports both `stdio` and `http` transports
208 | - Configure MCP server in `.vscode/mcp.json`:
209 | 
210 | **Stdio Transport:**
211 | 
212 | ```json
213 | {
214 |   "servers": {
215 |     "dbhub": {
216 |       "command": "npx",
217 |       "args": [
218 |         "-y",
219 |         "@bytebase/dbhub",
220 |         "--transport",
221 |         "stdio",
222 |         "--dsn",
223 |         "postgres://user:password@localhost:5432/dbname"
224 |       ]
225 |     }
226 |   },
227 |   "inputs": []
228 | }
229 | ```
230 | 
231 | **HTTP Transport:**
232 | 
233 | ```json
234 | {
235 |   "servers": {
236 |     "dbhub": {
237 |       "url": "http://localhost:8080/message",
238 |       "type": "http"
239 |     }
240 |   },
241 |   "inputs": []
242 | }
243 | ```
244 | 
245 | **Copilot Instructions:**
246 | 
247 | You can provide Copilot with context by creating `.github/copilot-instructions.md`:
248 | 
249 | ```markdown
250 | ## Database Access
251 | 
252 | This project provides an MCP server (DBHub) for secure SQL access to the development database.
253 | 
254 | AI agents can execute SQL queries. In read-only mode (recommended for production):
255 | 
256 | - `SELECT * FROM users LIMIT 5;`
257 | - `SHOW TABLES;`
258 | - `DESCRIBE table_name;`
259 | 
260 | In read-write mode (development/testing):
261 | 
262 | - `INSERT INTO users (name, email) VALUES ('John', '[email protected]');`
263 | - `UPDATE users SET status = 'active' WHERE id = 1;`
264 | - `CREATE TABLE test_table (id INT PRIMARY KEY);`
265 | 
266 | Use `--readonly` flag to restrict to read-only operations for safety.
267 | ```
268 | 
269 | ## Usage
270 | 
271 | ### Read-only Mode
272 | 
273 | You can run DBHub in read-only mode, which restricts SQL query execution to read-only operations:
274 | 
275 | ```bash
276 | # Enable read-only mode
277 | npx @bytebase/dbhub --readonly --dsn "postgres://user:password@localhost:5432/dbname"
278 | ```
279 | 
280 | In read-only mode, only [readonly SQL operations](https://github.com/bytebase/dbhub/blob/main/src/utils/allowed-keywords.ts) are allowed.
281 | 
282 | This provides an additional layer of security when connecting to production databases.
283 | 
284 | ### Suffix Tool Names with ID
285 | 
286 | You can suffix tool names with a custom ID using the `--id` flag or `ID` environment variable. This is useful when running multiple DBHub instances (e.g., in Cursor) to allow the client to route queries to the correct database.
287 | 
288 | **Example configuration for multiple databases in Cursor:**
289 | 
290 | ```json
291 | {
292 |   "mcpServers": {
293 |     "dbhub-prod": {
294 |       "command": "npx",
295 |       "args": ["-y", "@bytebase/dbhub"],
296 |       "env": {
297 |         "ID": "prod",
298 |         "DSN": "postgres://user:password@prod-host:5432/dbname"
299 |       }
300 |     },
301 |     "dbhub-staging": {
302 |       "command": "npx",
303 |       "args": ["-y", "@bytebase/dbhub"],
304 |       "env": {
305 |         "ID": "staging",
306 |         "DSN": "mysql://user:password@staging-host:3306/dbname"
307 |       }
308 |     }
309 |   }
310 | }
311 | ```
312 | 
313 | With this configuration:
314 | 
315 | - Production database tools: `execute_sql_prod`
316 | - Staging database tools: `execute_sql_staging`
317 | 
318 | ### Row Limiting
319 | 
320 | You can limit the number of rows returned from SELECT queries using the `--max-rows` parameter. This helps prevent accidentally retrieving too much data from large tables:
321 | 
322 | ```bash
323 | # Limit SELECT queries to return at most 1000 rows
324 | npx @bytebase/dbhub --dsn "postgres://user:password@localhost:5432/dbname" --max-rows 1000
325 | ```
326 | 
327 | - Row limiting is only applied to SELECT statements, not INSERT/UPDATE/DELETE
328 | - If your query already has a `LIMIT` or `TOP` clause, DBHub uses the smaller value
329 | 
330 | ### SSL Connections
331 | 
332 | You can specify the SSL mode using the `sslmode` parameter in your DSN string:
333 | 
334 | | Database   | `sslmode=disable` | `sslmode=require` |   Default SSL Behavior   |
335 | | ---------- | :---------------: | :---------------: | :----------------------: |
336 | | PostgreSQL |        ✅         |        ✅         | Certificate verification |
337 | | MySQL      |        ✅         |        ✅         | Certificate verification |
338 | | MariaDB    |        ✅         |        ✅         | Certificate verification |
339 | | SQL Server |        ✅         |        ✅         | Certificate verification |
340 | | SQLite     |        ❌         |        ❌         |     N/A (file-based)     |
341 | 
342 | **SSL Mode Options:**
343 | 
344 | - `sslmode=disable`: All SSL/TLS encryption is turned off. Data is transmitted in plaintext.
345 | - `sslmode=require`: Connection is encrypted, but the server's certificate is not verified. This provides protection against packet sniffing but not against man-in-the-middle attacks. You may use this for trusted self-signed CA.
346 | 
347 | Without specifying `sslmode`, most databases default to certificate verification, which provides the highest level of security.
348 | 
349 | Example usage:
350 | 
351 | ```bash
352 | # Disable SSL
353 | postgres://user:password@localhost:5432/dbname?sslmode=disable
354 | 
355 | # Require SSL without certificate verification
356 | postgres://user:password@localhost:5432/dbname?sslmode=require
357 | 
358 | # Standard SSL with certificate verification (default)
359 | postgres://user:password@localhost:5432/dbname
360 | ```
361 | 
362 | ### SSH Tunnel Support
363 | 
364 | DBHub supports connecting to databases through SSH tunnels, enabling secure access to databases in private networks or behind firewalls.
365 | 
366 | #### Using SSH Config File (Recommended)
367 | 
368 | DBHub can read SSH connection settings from your `~/.ssh/config` file. Simply use the host alias from your SSH config:
369 | 
370 | ```bash
371 | # If you have this in ~/.ssh/config:
372 | # Host mybastion
373 | #   HostName bastion.example.com
374 | #   User ubuntu
375 | #   IdentityFile ~/.ssh/id_rsa
376 | 
377 | npx @bytebase/dbhub \
378 |   --dsn "postgres://dbuser:[email protected]:5432/mydb" \
379 |   --ssh-host mybastion
380 | ```
381 | 
382 | DBHub will automatically use the settings from your SSH config, including hostname, user, port, and identity file. If no identity file is specified in the config, DBHub will try common default locations (`~/.ssh/id_rsa`, `~/.ssh/id_ed25519`, etc.).
383 | 
384 | #### SSH with Password Authentication
385 | 
386 | ```bash
387 | npx @bytebase/dbhub \
388 |   --dsn "postgres://dbuser:[email protected]:5432/mydb" \
389 |   --ssh-host bastion.example.com \
390 |   --ssh-user ubuntu \
391 |   --ssh-password mypassword
392 | ```
393 | 
394 | #### SSH with Private Key Authentication
395 | 
396 | ```bash
397 | npx @bytebase/dbhub \
398 |   --dsn "postgres://dbuser:[email protected]:5432/mydb" \
399 |   --ssh-host bastion.example.com \
400 |   --ssh-user ubuntu \
401 |   --ssh-key ~/.ssh/id_rsa
402 | ```
403 | 
404 | #### SSH with Private Key and Passphrase
405 | 
406 | ```bash
407 | npx @bytebase/dbhub \
408 |   --dsn "postgres://dbuser:[email protected]:5432/mydb" \
409 |   --ssh-host bastion.example.com \
410 |   --ssh-port 2222 \
411 |   --ssh-user ubuntu \
412 |   --ssh-key ~/.ssh/id_rsa \
413 |   --ssh-passphrase mykeypassphrase
414 | ```
415 | 
416 | #### Using Environment Variables
417 | 
418 | ```bash
419 | export SSH_HOST=bastion.example.com
420 | export SSH_USER=ubuntu
421 | export SSH_KEY=~/.ssh/id_rsa
422 | npx @bytebase/dbhub --dsn "postgres://dbuser:[email protected]:5432/mydb"
423 | ```
424 | 
425 | **Note**: When using SSH tunnels, the database host in your DSN should be the hostname/IP as seen from the SSH server (bastion host), not from your local machine.
426 | 
427 | ### Configure your database connection
428 | 
429 | You can use DBHub in demo mode with a sample employee database for testing:
430 | 
431 | ```bash
432 | npx @bytebase/dbhub  --demo
433 | ```
434 | 
435 | > [!WARNING]
436 | > If your user/password contains special characters, you have two options:
437 | >
438 | > 1. Escape them in the DSN (e.g. `pass#word` should be escaped as `pass%23word`)
439 | > 2. Use the individual database parameters method below (recommended)
440 | 
441 | For real databases, you can configure the database connection in two ways:
442 | 
443 | #### Method 1: Database Source Name (DSN)
444 | 
445 | - **Command line argument** (highest priority):
446 | 
447 |   ```bash
448 |   npx @bytebase/dbhub  --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
449 |   ```
450 | 
451 | - **Environment variable** (second priority):
452 | 
453 |   ```bash
454 |   export DSN="postgres://user:password@localhost:5432/dbname?sslmode=disable"
455 |   npx @bytebase/dbhub
456 |   ```
457 | 
458 | - **Environment file** (third priority):
459 |   - For development: Create `.env.local` with your DSN
460 |   - For production: Create `.env` with your DSN
461 |   ```
462 |   DSN=postgres://user:password@localhost:5432/dbname?sslmode=disable
463 |   ```
464 | 
465 | #### Method 2: Individual Database Parameters
466 | 
467 | If your password contains special characters that would break URL parsing, use individual environment variables instead:
468 | 
469 | - **Environment variables**:
470 | 
471 |   ```bash
472 |   export DB_TYPE=postgres
473 |   export DB_HOST=localhost
474 |   export DB_PORT=5432
475 |   export DB_USER=myuser
476 |   export DB_PASSWORD='my@complex:password/with#special&chars'
477 |   export DB_NAME=mydatabase
478 |   npx @bytebase/dbhub
479 |   ```
480 | 
481 | - **Environment file**:
482 |   ```
483 |   DB_TYPE=postgres
484 |   DB_HOST=localhost
485 |   DB_PORT=5432
486 |   DB_USER=myuser
487 |   DB_PASSWORD=my@complex:password/with#special&chars
488 |   DB_NAME=mydatabase
489 |   ```
490 | 
491 | **Supported DB_TYPE values**: `postgres`, `mysql`, `mariadb`, `sqlserver`, `sqlite`
492 | 
493 | **Default ports** (when DB_PORT is omitted):
494 | 
495 | - PostgreSQL: `5432`
496 | - MySQL/MariaDB: `3306`
497 | - SQL Server: `1433`
498 | 
499 | **For SQLite**: Only `DB_TYPE=sqlite` and `DB_NAME=/path/to/database.db` are required.
500 | 
501 | > [!TIP]
502 | > Use the individual parameter method when your password contains special characters like `@`, `:`, `/`, `#`, `&`, `=` that would break DSN parsing.
503 | 
504 | > [!WARNING]
505 | > When running in Docker, use `host.docker.internal` instead of `localhost` to connect to databases running on your host machine. For example: `mysql://user:[email protected]:3306/dbname`
506 | 
507 | DBHub supports the following database connection string formats:
508 | 
509 | | Database   | DSN Format                                               | Example                                                                                                        |
510 | | ---------- | -------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- |
511 | | MySQL      | `mysql://[user]:[password]@[host]:[port]/[database]`     | `mysql://user:password@localhost:3306/dbname?sslmode=disable`                                                  |
512 | | MariaDB    | `mariadb://[user]:[password]@[host]:[port]/[database]`   | `mariadb://user:password@localhost:3306/dbname?sslmode=disable`                                                |
513 | | PostgreSQL | `postgres://[user]:[password]@[host]:[port]/[database]`  | `postgres://user:password@localhost:5432/dbname?sslmode=disable`                                               |
514 | | SQL Server | `sqlserver://[user]:[password]@[host]:[port]/[database]` | `sqlserver://user:password@localhost:1433/dbname?sslmode=disable`                                              |
515 | | SQLite     | `sqlite:///[path/to/file]` or `sqlite:///:memory:`       | `sqlite:///path/to/database.db`, `sqlite:C:/Users/YourName/data/database.db (windows)` or `sqlite:///:memory:` |
516 | 
517 | #### SQL Server
518 | 
519 | Extra query parameters:
520 | 
521 | #### authentication
522 | 
523 | - `authentication=azure-active-directory-access-token`. Only applicable when running from Azure. See [DefaultAzureCredential](https://learn.microsoft.com/en-us/azure/developer/javascript/sdk/authentication/credential-chains#use-defaultazurecredential-for-flexibility).
524 | 
525 | ### Transport
526 | 
527 | - **stdio** (default) - for direct integration with tools like Claude Desktop:
528 | 
529 |   ```bash
530 |   npx @bytebase/dbhub --transport stdio --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
531 |   ```
532 | 
533 | - **http** - for browser and network clients:
534 |   ```bash
535 |   npx @bytebase/dbhub --transport http --port 5678 --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
536 |   ```
537 | 
538 | ### Command line options
539 | 
540 | | Option         | Environment Variable | Description                                                           | Default                      |
541 | | -------------- | -------------------- | --------------------------------------------------------------------- | ---------------------------- |
542 | | dsn            | `DSN`                | Database connection string                                            | Required if not in demo mode |
543 | | N/A            | `DB_TYPE`            | Database type: `postgres`, `mysql`, `mariadb`, `sqlserver`, `sqlite`  | N/A                          |
544 | | N/A            | `DB_HOST`            | Database server hostname (not needed for SQLite)                      | N/A                          |
545 | | N/A            | `DB_PORT`            | Database server port (uses default if omitted, not needed for SQLite) | N/A                          |
546 | | N/A            | `DB_USER`            | Database username (not needed for SQLite)                             | N/A                          |
547 | | N/A            | `DB_PASSWORD`        | Database password (not needed for SQLite)                             | N/A                          |
548 | | N/A            | `DB_NAME`            | Database name or SQLite file path                                     | N/A                          |
549 | | transport      | `TRANSPORT`          | Transport mode: `stdio` or `http`                                     | `stdio`                      |
550 | | port           | `PORT`               | HTTP server port (only applicable when using `--transport=http`)      | `8080`                       |
551 | | readonly       | `READONLY`           | Restrict SQL execution to read-only operations                        | `false`                      |
552 | | max-rows       | N/A                  | Limit the number of rows returned from SELECT queries                 | No limit                     |
553 | | demo           | N/A                  | Run in demo mode with sample employee database                        | `false`                      |
554 | | id             | `ID`                 | Instance identifier to suffix tool names (for multi-instance)         | N/A                          |
555 | | ssh-host       | `SSH_HOST`           | SSH server hostname for tunnel connection                             | N/A                          |
556 | | ssh-port       | `SSH_PORT`           | SSH server port                                                       | `22`                         |
557 | | ssh-user       | `SSH_USER`           | SSH username                                                          | N/A                          |
558 | | ssh-password   | `SSH_PASSWORD`       | SSH password (for password authentication)                            | N/A                          |
559 | | ssh-key        | `SSH_KEY`            | Path to SSH private key file                                          | N/A                          |
560 | | ssh-passphrase | `SSH_PASSPHRASE`     | Passphrase for SSH private key                                        | N/A                          |
561 | 
562 | The demo mode uses an in-memory SQLite database loaded with the [sample employee database](https://github.com/bytebase/dbhub/tree/main/resources/employee-sqlite) that includes tables for employees, departments, titles, salaries, department employees, and department managers. The sample database includes SQL scripts for table creation, data loading, and testing.
563 | 
564 | ## Development
565 | 
566 | 1. Install dependencies:
567 | 
568 |    ```bash
569 |    pnpm install
570 |    ```
571 | 
572 | 1. Run in development mode:
573 | 
574 |    ```bash
575 |    pnpm dev
576 |    ```
577 | 
578 | 1. Build for production:
579 |    ```bash
580 |    pnpm build
581 |    pnpm start --transport stdio --dsn "postgres://user:password@localhost:5432/dbname?sslmode=disable"
582 |    ```
583 | 
584 | ### Testing
585 | 
586 | The project uses Vitest for comprehensive unit and integration testing:
587 | 
588 | - **Run all tests**: `pnpm test`
589 | - **Run tests in watch mode**: `pnpm test:watch`
590 | - **Run integration tests**: `pnpm test:integration`
591 | 
592 | #### Integration Tests
593 | 
594 | DBHub includes comprehensive integration tests for all supported database connectors using [Testcontainers](https://testcontainers.com/). These tests run against real database instances in Docker containers, ensuring full compatibility and feature coverage.
595 | 
596 | ##### Prerequisites
597 | 
598 | - **Docker**: Ensure Docker is installed and running on your machine
599 | - **Docker Resources**: Allocate sufficient memory (recommended: 4GB+) for multiple database containers
600 | - **Network Access**: Ability to pull Docker images from registries
601 | 
602 | ##### Running Integration Tests
603 | 
604 | **Note**: This command runs all integration tests in parallel, which may take 5-15 minutes depending on your system resources and network speed.
605 | 
606 | ```bash
607 | # Run all database integration tests
608 | pnpm test:integration
609 | ```
610 | 
611 | ```bash
612 | # Run only PostgreSQL integration tests
613 | pnpm test src/connectors/__tests__/postgres.integration.test.ts
614 | # Run only MySQL integration tests
615 | pnpm test src/connectors/__tests__/mysql.integration.test.ts
616 | # Run only MariaDB integration tests
617 | pnpm test src/connectors/__tests__/mariadb.integration.test.ts
618 | # Run only SQL Server integration tests
619 | pnpm test src/connectors/__tests__/sqlserver.integration.test.ts
620 | # Run only SQLite integration tests
621 | pnpm test src/connectors/__tests__/sqlite.integration.test.ts
622 | # Run JSON RPC integration tests
623 | pnpm test src/__tests__/json-rpc-integration.test.ts
624 | ```
625 | 
626 | All integration tests follow these patterns:
627 | 
628 | 1. **Container Lifecycle**: Start database container → Connect → Setup test data → Run tests → Cleanup
629 | 2. **Shared Test Utilities**: Common test patterns implemented in `IntegrationTestBase` class
630 | 3. **Database-Specific Features**: Each database includes tests for unique features and capabilities
631 | 4. **Error Handling**: Comprehensive testing of connection errors, invalid SQL, and edge cases
632 | 
633 | ##### Troubleshooting Integration Tests
634 | 
635 | **Container Startup Issues:**
636 | 
637 | ```bash
638 | # Check Docker is running
639 | docker ps
640 | 
641 | # Check available memory
642 | docker system df
643 | 
644 | # Pull images manually if needed
645 | docker pull postgres:15-alpine
646 | docker pull mysql:8.0
647 | docker pull mariadb:10.11
648 | docker pull mcr.microsoft.com/mssql/server:2019-latest
649 | ```
650 | 
651 | **SQL Server Timeout Issues:**
652 | 
653 | - SQL Server containers require significant startup time (3-5 minutes)
654 | - Ensure Docker has sufficient memory allocated (4GB+ recommended)
655 | - Consider running SQL Server tests separately if experiencing timeouts
656 | 
657 | **Network/Resource Issues:**
658 | 
659 | ```bash
660 | # Run tests with verbose output
661 | pnpm test:integration --reporter=verbose
662 | 
663 | # Run single database test to isolate issues
664 | pnpm test:integration -- --testNamePattern="PostgreSQL"
665 | 
666 | # Check Docker container logs if tests fail
667 | docker logs <container_id>
668 | ```
669 | 
670 | #### Pre-commit Hooks (for Developers)
671 | 
672 | The project includes pre-commit hooks to run tests automatically before each commit:
673 | 
674 | 1. After cloning the repository, set up the pre-commit hooks:
675 | 
676 |    ```bash
677 |    ./scripts/setup-husky.sh
678 |    ```
679 | 
680 | 2. This ensures the test suite runs automatically whenever you create a commit, preventing commits that would break tests.
681 | 
682 | ### Debug with [MCP Inspector](https://github.com/modelcontextprotocol/inspector)
683 | 
684 | ![mcp-inspector](https://raw.githubusercontent.com/bytebase/dbhub/main/resources/images/mcp-inspector.webp)
685 | 
686 | #### stdio
687 | 
688 | ```bash
689 | # PostgreSQL example
690 | TRANSPORT=stdio DSN="postgres://user:password@localhost:5432/dbname?sslmode=disable" npx @modelcontextprotocol/inspector node /path/to/dbhub/dist/index.js
691 | ```
692 | 
693 | #### HTTP
694 | 
695 | ```bash
696 | # Start DBHub with HTTP transport
697 | pnpm dev --transport=http --port=8080
698 | 
699 | # Start the MCP Inspector in another terminal
700 | npx @modelcontextprotocol/inspector
701 | ```
702 | 
703 | Connect to the DBHub server `/message` endpoint
704 | 
705 | ## Contributors
706 | 
707 | <a href="https://github.com/bytebase/dbhub/graphs/contributors">
708 |   <img src="https://contrib.rocks/image?repo=bytebase/dbhub" />
709 | </a>
710 | 
711 | ## Star History
712 | 
713 | [![Star History Chart](https://api.star-history.com/svg?repos=bytebase/dbhub&type=Date)](https://www.star-history.com/#bytebase/dbhub&Date)
714 | 
```

--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------

```markdown
  1 | # CLAUDE.md
  2 | 
  3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
  4 | 
  5 | # DBHub Development Guidelines
  6 | 
  7 | DBHub is a Universal Database Gateway implementing the Model Context Protocol (MCP) server interface. It bridges MCP-compatible clients (Claude Desktop, Claude Code, Cursor) with various database systems.
  8 | 
  9 | ## Commands
 10 | 
 11 | - Build: `pnpm run build` - Compiles TypeScript to JavaScript using tsup
 12 | - Start: `pnpm run start` - Runs the compiled server
 13 | - Dev: `pnpm run dev` - Runs server with tsx (no compilation needed)
 14 | - Test: `pnpm test` - Run all tests
 15 | - Test Watch: `pnpm test:watch` - Run tests in watch mode
 16 | - Integration Tests: `pnpm test:integration` - Run database integration tests (requires Docker)
 17 | - Pre-commit: `./scripts/setup-husky.sh` - Setup git hooks for automated testing
 18 | 
 19 | ## Architecture Overview
 20 | 
 21 | The codebase follows a modular architecture centered around the MCP protocol:
 22 | 
 23 | ```
 24 | src/
 25 | ├── connectors/          # Database-specific implementations
 26 | │   ├── postgres/        # PostgreSQL connector
 27 | │   ├── mysql/           # MySQL connector
 28 | │   ├── mariadb/         # MariaDB connector
 29 | │   ├── sqlserver/       # SQL Server connector
 30 | │   └── sqlite/          # SQLite connector
 31 | ├── resources/           # MCP resource handlers (DB exploration)
 32 | │   ├── schemas.ts       # Schema listing
 33 | │   ├── tables.ts        # Table exploration
 34 | │   ├── indexes.ts       # Index information
 35 | │   └── procedures.ts    # Stored procedures
 36 | ├── tools/               # MCP tool handlers
 37 | │   └── execute-sql.ts   # SQL execution handler
 38 | ├── prompts/             # AI prompt handlers
 39 | │   ├── generate-sql.ts  # SQL generation
 40 | │   └── explain-db.ts    # Database explanation
 41 | ├── utils/               # Shared utilities
 42 | │   ├── dsn-obfuscator.ts# DSN security
 43 | │   ├── response-formatter.ts # Output formatting
 44 | │   └── allowed-keywords.ts  # Read-only SQL validation
 45 | └── index.ts             # Entry point with transport handling
 46 | ```
 47 | 
 48 | Key architectural patterns:
 49 | - **Connector Registry**: Dynamic registration system for database connectors
 50 | - **Transport Abstraction**: Support for both stdio (desktop tools) and HTTP (network clients)
 51 | - **Resource/Tool/Prompt Handlers**: Clean separation of MCP protocol concerns
 52 | - **Integration Test Base**: Shared test utilities for consistent connector testing
 53 | 
 54 | ## Environment
 55 | 
 56 | - Copy `.env.example` to `.env` and configure for your database connection
 57 | - Two ways to configure:
 58 |   - Set `DSN` to a full connection string (recommended)
 59 |   - Set `DB_CONNECTOR_TYPE` to select a connector with its default DSN
 60 | - Transport options:
 61 |   - Set `--transport=stdio` (default) for stdio transport
 62 |   - Set `--transport=http` for streamable HTTP transport with HTTP server
 63 | - Demo mode: Use `--demo` flag for bundled SQLite employee database
 64 | - Read-only mode: Use `--readonly` flag to restrict to read-only SQL operations
 65 | 
 66 | ## Database Connectors
 67 | 
 68 | - Add new connectors in `src/connectors/{db-type}/index.ts`
 69 | - Implement the `Connector` and `DSNParser` interfaces from `src/interfaces/connector.ts`
 70 | - Register connector with `ConnectorRegistry.register(connector)`
 71 | - DSN Examples:
 72 |   - PostgreSQL: `postgres://user:password@localhost:5432/dbname?sslmode=disable`
 73 |   - MySQL: `mysql://user:password@localhost:3306/dbname?sslmode=disable`
 74 |   - MariaDB: `mariadb://user:password@localhost:3306/dbname?sslmode=disable`
 75 |   - SQL Server: `sqlserver://user:password@localhost:1433/dbname?sslmode=disable`
 76 |   - SQLite: `sqlite:///path/to/database.db` or `sqlite:///:memory:`
 77 | - SSL modes: `sslmode=disable` (no SSL) or `sslmode=require` (SSL without cert verification)
 78 | 
 79 | ## Testing Approach
 80 | 
 81 | - Unit tests for individual components and utilities
 82 | - Integration tests using Testcontainers for real database testing
 83 | - All connectors have comprehensive integration test coverage
 84 | - Pre-commit hooks run related tests automatically
 85 | - Test specific databases: `pnpm test src/connectors/__tests__/{db-type}.integration.test.ts`
 86 | - SSH tunnel tests: `pnpm test postgres-ssh-simple.integration.test.ts`
 87 | 
 88 | ## SSH Tunnel Support
 89 | 
 90 | DBHub supports SSH tunnels for secure database connections through bastion hosts:
 91 | 
 92 | - Configuration via command-line options: `--ssh-host`, `--ssh-port`, `--ssh-user`, `--ssh-password`, `--ssh-key`, `--ssh-passphrase`
 93 | - Configuration via environment variables: `SSH_HOST`, `SSH_PORT`, `SSH_USER`, `SSH_PASSWORD`, `SSH_KEY`, `SSH_PASSPHRASE`
 94 | - SSH config file support: Automatically reads from `~/.ssh/config` when using host aliases
 95 | - Implementation in `src/utils/ssh-tunnel.ts` using the `ssh2` library
 96 | - SSH config parsing in `src/utils/ssh-config-parser.ts` using the `ssh-config` library
 97 | - Automatic tunnel establishment when SSH config is detected
 98 | - Support for both password and key-based authentication
 99 | - Default SSH key detection (tries `~/.ssh/id_rsa`, `~/.ssh/id_ed25519`, etc.)
100 | - Tunnel lifecycle managed by `ConnectorManager`
101 | 
102 | ## Code Style
103 | 
104 | - TypeScript with strict mode enabled
105 | - ES modules with `.js` extension in imports
106 | - Group imports: Node.js core modules → third-party → local modules
107 | - Use camelCase for variables/functions, PascalCase for classes/types
108 | - Include explicit type annotations for function parameters/returns
109 | - Use try/finally blocks with DB connections (always release clients)
110 | - Prefer async/await over callbacks and Promise chains
111 | - Format error messages consistently
112 | - Use parameterized queries for DB operations
113 | - Validate inputs with zod schemas
114 | - Include fallbacks for environment variables
115 | - Use descriptive variable/function names
116 | - Keep functions focused and single-purpose
117 | 
```

--------------------------------------------------------------------------------
/src/types/sql.ts:
--------------------------------------------------------------------------------

```typescript
1 | /**
2 |  * SQL dialect types supported by the application
3 |  */
4 | export type SQLDialect = "postgres" | "sqlite" | "mysql" | "mariadb" | "mssql" | "ansi";
5 | 
```

--------------------------------------------------------------------------------
/pnpm-workspace.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | packages:
 2 |   - '.'
 3 | 
 4 | approvedBuilds:
 5 |   - better-sqlite3
 6 | ignoredBuiltDependencies:
 7 |   - cpu-features
 8 |   - esbuild
 9 |   - protobufjs
10 |   - ssh2
11 | onlyBuiltDependencies:
12 |   - better-sqlite3
13 | 
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/show_elapsed.sql:
--------------------------------------------------------------------------------

```sql
1 | -- SQLite doesn't have information_schema like MySQL
2 | -- This is a simpler version that just shows when the script was run
3 | 
4 | SELECT 'Database loaded at ' || datetime('now', 'localtime') AS completion_time;
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/load_department.sql:
--------------------------------------------------------------------------------

```sql
 1 | INSERT INTO department VALUES 
 2 | ('d001','Marketing'),
 3 | ('d002','Finance'),
 4 | ('d003','Human Resources'),
 5 | ('d004','Production'),
 6 | ('d005','Development'),
 7 | ('d006','Quality Management'),
 8 | ('d007','Sales'),
 9 | ('d008','Research'),
10 | ('d009','Customer Service');
```

--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { defineConfig } from 'vitest/config';
 2 | 
 3 | export default defineConfig({
 4 |   test: {
 5 |     globals: true,
 6 |     environment: 'node',
 7 |     include: ['src/**/*.{test,spec}.ts'],
 8 |     coverage: {
 9 |       provider: 'v8',
10 |       reporter: ['text', 'lcov'],
11 |     },
12 |   },
13 | });
```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "compilerOptions": {
 3 |     "target": "ES2020",
 4 |     "module": "NodeNext",
 5 |     "moduleResolution": "NodeNext",
 6 |     "esModuleInterop": true,
 7 |     "outDir": "./dist",
 8 |     "strict": true,
 9 |     "lib": ["ES2020", "ES2021.Promise", "ES2022.Error"]
10 |   },
11 |   "include": ["src/**/*"]
12 | }
13 | 
```

--------------------------------------------------------------------------------
/scripts/setup-husky.sh:
--------------------------------------------------------------------------------

```bash
 1 | #!/bin/bash
 2 | 
 3 | # This script is used to set up Husky for development
 4 | # It should be run manually, not as part of production builds
 5 | 
 6 | echo "Setting up Husky for the project..."
 7 | npx husky init
 8 | 
 9 | # Create the pre-commit hook
10 | cat > .husky/pre-commit << 'EOL'
11 | #!/usr/bin/env sh
12 | 
13 | # Run lint-staged to check only the files that are being committed
14 | pnpm lint-staged
15 | 
16 | # Run the test suite to ensure everything passes
17 | pnpm test
18 | EOL
19 | 
20 | chmod +x .husky/pre-commit
21 | 
22 | echo "Husky setup complete!"
```

--------------------------------------------------------------------------------
/.github/copilot-instructions.md:
--------------------------------------------------------------------------------

```markdown
 1 | ## Database Access
 2 | 
 3 | This project provides an MCP server (DBHub) for secure SQL access to the development database.
 4 | 
 5 | AI agents can execute SQL queries. In read-only mode (recommended for production):
 6 | 
 7 | - `SELECT * FROM users LIMIT 5;`
 8 | - `SHOW TABLES;`
 9 | - `DESCRIBE table_name;`
10 | 
11 | In read-write mode (development/testing):
12 | 
13 | - `INSERT INTO users (name, email) VALUES ('John', '[email protected]');`
14 | - `UPDATE users SET status = 'active' WHERE id = 1;`
15 | - `CREATE TABLE test_table (id INT PRIMARY KEY);`
16 | 
17 | Use `--readonly` flag to restrict to read-only operations for safety.
18 | 
```

--------------------------------------------------------------------------------
/src/utils/allowed-keywords.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { ConnectorType } from "../connectors/interface.js";
 2 | 
 3 | /**
 4 |  * List of allowed keywords for SQL queries
 5 |  * Not only SELECT queries are allowed,
 6 |  * but also other queries that are not destructive
 7 |  */
 8 | export const allowedKeywords: Record<ConnectorType, string[]> = {
 9 |   postgres: ["select", "with", "explain", "analyze", "show"],
10 |   mysql: ["select", "with", "explain", "analyze", "show", "describe", "desc"],
11 |   mariadb: ["select", "with", "explain", "analyze", "show", "describe", "desc"],
12 |   sqlite: ["select", "with", "explain", "analyze", "pragma"],
13 |   sqlserver: ["select", "with", "explain", "showplan"],
14 | };
15 | 
```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
 1 | #!/usr/bin/env node
 2 | 
 3 | // Import connector modules to register them
 4 | import "./connectors/postgres/index.js"; // Register PostgreSQL connector
 5 | import "./connectors/sqlserver/index.js"; // Register SQL Server connector
 6 | import "./connectors/sqlite/index.js"; // SQLite connector
 7 | import "./connectors/mysql/index.js"; // MySQL connector
 8 | import "./connectors/mariadb/index.js"; // MariaDB connector
 9 | 
10 | // Import main function from server.ts
11 | import { main } from "./server.js";
12 | 
13 | /**
14 |  * Entry point for the DBHub MCP Server
15 |  * Handles top-level exceptions and starts the server
16 |  */
17 | main().catch((error) => {
18 |   console.error("Fatal error:", error);
19 |   process.exit(1);
20 | });
21 | 
```

--------------------------------------------------------------------------------
/src/tools/index.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
 2 | import { executeSqlToolHandler, executeSqlSchema } from "./execute-sql.js";
 3 | /**
 4 |  * Register all tool handlers with the MCP server
 5 |  * @param server - The MCP server instance
 6 |  * @param id - Optional ID to suffix tool names (for Cursor multi-instance support)
 7 |  */
 8 | export function registerTools(server: McpServer, id?: string): void {
 9 |   // Build tool name with optional suffix
10 |   const toolName = id ? `execute_sql_${id}` : "execute_sql";
11 | 
12 |   // Tool to run a SQL query (read-only for safety)
13 |   server.tool(
14 |     toolName,
15 |     "Execute a SQL query on the current database",
16 |     executeSqlSchema,
17 |     executeSqlToolHandler
18 |   );
19 | 
20 | }
21 | 
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/load_dept_manager.sql:
--------------------------------------------------------------------------------

```sql
 1 | INSERT INTO dept_manager VALUES 
 2 | (10002,'d001','1985-01-01','1991-10-01'),
 3 | (10039,'d001','1991-10-01','9999-01-01'),
 4 | (10085,'d002','1985-01-01','1989-12-17'),
 5 | (10114,'d002','1989-12-17','9999-01-01'),
 6 | (10183,'d003','1985-01-01','1992-03-21'),
 7 | (10228,'d003','1992-03-21','9999-01-01'),
 8 | (10303,'d004','1985-01-01','1988-09-09'),
 9 | (10344,'d004','1988-09-09','1992-08-02'),
10 | (10386,'d004','1992-08-02','1996-08-30'),
11 | (10420,'d004','1996-08-30','9999-01-01'),
12 | (10511,'d005','1985-01-01','1992-04-25'),
13 | (10567,'d005','1992-04-25','9999-01-01'),
14 | (10725,'d006','1985-01-01','1989-05-06'),
15 | (10765,'d006','1989-05-06','1991-09-12'),
16 | (10800,'d006','1991-09-12','1994-06-28'),
17 | (10854,'d006','1994-06-28','9999-01-01');
```

--------------------------------------------------------------------------------
/src/prompts/index.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
 2 | import { sqlGeneratorPromptHandler, sqlGeneratorSchema } from "./sql-generator.js";
 3 | import { dbExplainerPromptHandler, dbExplainerSchema } from "./db-explainer.js";
 4 | 
 5 | /**
 6 |  * Register all prompt handlers with the MCP server
 7 |  */
 8 | export function registerPrompts(server: McpServer): void {
 9 |   // Register SQL Generator prompt
10 |   server.prompt(
11 |     "generate_sql",
12 |     "Generate SQL queries from natural language descriptions",
13 |     sqlGeneratorSchema,
14 |     sqlGeneratorPromptHandler
15 |   );
16 | 
17 |   // Register Database Explainer prompt
18 |   server.prompt(
19 |     "explain_db",
20 |     "Get explanations about database tables, columns, and structures",
21 |     dbExplainerSchema,
22 |     dbExplainerPromptHandler
23 |   );
24 | }
25 | 
```

--------------------------------------------------------------------------------
/src/resources/schemas.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { ConnectorManager } from "../connectors/manager.js";
 2 | import {
 3 |   createResourceSuccessResponse,
 4 |   createResourceErrorResponse,
 5 | } from "../utils/response-formatter.js";
 6 | 
 7 | /**
 8 |  * Schemas resource handler
 9 |  * Returns a list of all schemas in the database
10 |  */
11 | export async function schemasResourceHandler(uri: URL, _extra: any) {
12 |   const connector = ConnectorManager.getCurrentConnector();
13 | 
14 |   try {
15 |     const schemas = await connector.getSchemas();
16 | 
17 |     // Prepare response data
18 |     const responseData = {
19 |       schemas: schemas,
20 |       count: schemas.length,
21 |     };
22 | 
23 |     // Use the utility to create a standardized response
24 |     return createResourceSuccessResponse(uri.href, responseData);
25 |   } catch (error) {
26 |     return createResourceErrorResponse(
27 |       uri.href,
28 |       `Error retrieving database schemas: ${(error as Error).message}`,
29 |       "SCHEMAS_RETRIEVAL_ERROR"
30 |     );
31 |   }
32 | }
33 | 
```

--------------------------------------------------------------------------------
/src/types/ssh.ts:
--------------------------------------------------------------------------------

```typescript
 1 | /**
 2 |  * SSH Tunnel Configuration Types
 3 |  */
 4 | 
 5 | export interface SSHTunnelConfig {
 6 |   /** SSH server hostname */
 7 |   host: string;
 8 |   
 9 |   /** SSH server port (default: 22) */
10 |   port?: number;
11 |   
12 |   /** SSH username */
13 |   username: string;
14 |   
15 |   /** SSH password (for password authentication) */
16 |   password?: string;
17 |   
18 |   /** Path to SSH private key file */
19 |   privateKey?: string;
20 |   
21 |   /** Passphrase for SSH private key */
22 |   passphrase?: string;
23 | }
24 | 
25 | export interface SSHTunnelOptions {
26 |   /** Target database host (as seen from SSH server) */
27 |   targetHost: string;
28 |   
29 |   /** Target database port */
30 |   targetPort: number;
31 |   
32 |   /** Local port to bind the tunnel (0 for dynamic allocation) */
33 |   localPort?: number;
34 | }
35 | 
36 | export interface SSHTunnelInfo {
37 |   /** Local port where the tunnel is listening */
38 |   localPort: number;
39 |   
40 |   /** Original target host */
41 |   targetHost: string;
42 |   
43 |   /** Original target port */
44 |   targetPort: number;
45 | }
```

--------------------------------------------------------------------------------
/tsup.config.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { defineConfig } from 'tsup';
 2 | import fs from 'fs';
 3 | import path from 'path';
 4 | 
 5 | export default defineConfig({
 6 |   entry: ['src/index.ts'],
 7 |   format: ['esm'],
 8 |   dts: true,
 9 |   clean: true,
10 |   outDir: 'dist',
11 |   // Copy the employee-sqlite resources to dist
12 |   async onSuccess() {
13 |     // Create target directory
14 |     const targetDir = path.join('dist', 'resources', 'employee-sqlite');
15 |     fs.mkdirSync(targetDir, { recursive: true });
16 | 
17 |     // Copy all SQL files from resources/employee-sqlite to dist/resources/employee-sqlite
18 |     const sourceDir = path.join('resources', 'employee-sqlite');
19 |     const files = fs.readdirSync(sourceDir);
20 | 
21 |     for (const file of files) {
22 |       if (file.endsWith('.sql')) {
23 |         const sourcePath = path.join(sourceDir, file);
24 |         const targetPath = path.join(targetDir, file);
25 |         fs.copyFileSync(sourcePath, targetPath);
26 |         console.log(`Copied ${sourcePath} to ${targetPath}`);
27 |       }
28 |     }
29 |   },
30 | });
31 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM node:22-alpine AS builder
 2 | 
 3 | WORKDIR /app
 4 | 
 5 | # Copy package.json and pnpm-lock.yaml
 6 | COPY package.json pnpm-lock.yaml ./
 7 | 
 8 | # Install pnpm
 9 | RUN corepack enable && corepack prepare pnpm@latest --activate
10 | 
11 | # Install dependencies
12 | RUN pnpm install
13 | 
14 | # Copy source code
15 | COPY . .
16 | 
17 | # Build the application
18 | RUN pnpm run build
19 | 
20 | # Production stage
21 | FROM node:22-alpine
22 | 
23 | WORKDIR /app
24 | 
25 | # Copy only production files
26 | COPY --from=builder /app/package.json /app/pnpm-lock.yaml ./
27 | 
28 | # Install pnpm
29 | RUN corepack enable && corepack prepare pnpm@latest --activate
30 | 
31 | RUN pnpm pkg set pnpm.onlyBuiltDependencies[0]=better-sqlite3
32 | RUN pnpm add better-sqlite3
33 | RUN node -e 'new require("better-sqlite3")(":memory:")'
34 | 
35 | # Install production dependencies only
36 | RUN pnpm install --prod
37 | 
38 | # Copy built application from builder stage
39 | COPY --from=builder /app/dist ./dist
40 | 
41 | # Expose ports
42 | EXPOSE 8080
43 | 
44 | # Set environment variables
45 | ENV NODE_ENV=production
46 | 
47 | # Run the server
48 | ENTRYPOINT ["node", "dist/index.js"]
49 | 
```

--------------------------------------------------------------------------------
/.github/workflows/run-tests.yml:
--------------------------------------------------------------------------------

```yaml
 1 | name: Run Tests
 2 | 
 3 | on:
 4 |   pull_request:
 5 |     branches: [ main ]
 6 |     # Run when PR is opened, synchronized, or reopened
 7 |     types: [opened, synchronize, reopened]
 8 |   # Also allow manual triggering
 9 |   workflow_dispatch:
10 | 
11 | jobs:
12 |   test:
13 |     name: Run Test Suite
14 |     runs-on: ubuntu-latest
15 |     
16 |     steps:
17 |       - name: Checkout code
18 |         uses: actions/checkout@v3
19 |       
20 |       - name: Install pnpm
21 |         uses: pnpm/action-setup@v2
22 |         with:
23 |           version: 8
24 |           run_install: false
25 | 
26 |       - name: Setup Node.js
27 |         uses: actions/setup-node@v3
28 |         with:
29 |           node-version: '20'
30 |           cache: 'pnpm'
31 |       
32 |       - name: Get pnpm store directory
33 |         id: pnpm-cache
34 |         shell: bash
35 |         run: |
36 |           echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
37 |       
38 |       - name: Setup pnpm cache
39 |         uses: actions/cache@v3
40 |         with:
41 |           path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
42 |           key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
43 |           restore-keys: |
44 |             ${{ runner.os }}-pnpm-store-
45 |       
46 |       - name: Install dependencies
47 |         run: pnpm install
48 |       
49 |       - name: Run tests
50 |         run: pnpm test
```

--------------------------------------------------------------------------------
/src/resources/tables.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { ConnectorManager } from "../connectors/manager.js";
 2 | import {
 3 |   createResourceSuccessResponse,
 4 |   createResourceErrorResponse,
 5 | } from "../utils/response-formatter.js";
 6 | 
 7 | /**
 8 |  * Tables resource handler
 9 |  * Returns a list of all tables in the database or within a specific schema
10 |  */
11 | export async function tablesResourceHandler(uri: URL, variables: any, _extra: any) {
12 |   const connector = ConnectorManager.getCurrentConnector();
13 | 
14 |   // Extract the schema name from URL variables if present
15 |   const schemaName =
16 |     variables && variables.schemaName
17 |       ? Array.isArray(variables.schemaName)
18 |         ? variables.schemaName[0]
19 |         : variables.schemaName
20 |       : undefined;
21 | 
22 |   try {
23 |     // If a schema name was provided, verify that it exists
24 |     if (schemaName) {
25 |       const availableSchemas = await connector.getSchemas();
26 |       if (!availableSchemas.includes(schemaName)) {
27 |         return createResourceErrorResponse(
28 |           uri.href,
29 |           `Schema '${schemaName}' does not exist or cannot be accessed`,
30 |           "SCHEMA_NOT_FOUND"
31 |         );
32 |       }
33 |     }
34 | 
35 |     // Get tables with optional schema filter
36 |     const tableNames = await connector.getTables(schemaName);
37 | 
38 |     // Prepare response data
39 |     const responseData = {
40 |       tables: tableNames,
41 |       count: tableNames.length,
42 |       schema: schemaName,
43 |     };
44 | 
45 |     // Use the utility to create a standardized response
46 |     return createResourceSuccessResponse(uri.href, responseData);
47 |   } catch (error) {
48 |     return createResourceErrorResponse(
49 |       uri.href,
50 |       `Error retrieving tables: ${(error as Error).message}`,
51 |       "TABLES_RETRIEVAL_ERROR"
52 |     );
53 |   }
54 | }
55 | 
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/object.sql:
--------------------------------------------------------------------------------

```sql
 1 | -- SQLite implementation of views and functions
 2 | -- This is simplified compared to the MySQL version
 3 | 
 4 | -- Drop views if they exist
 5 | DROP VIEW IF EXISTS v_full_employee;
 6 | DROP VIEW IF EXISTS v_full_department;
 7 | DROP VIEW IF EXISTS emp_dept_current;
 8 | 
 9 | -- Create helper view to get current department for employees
10 | CREATE VIEW emp_dept_current AS
11 | SELECT 
12 |     e.emp_no,
13 |     de.dept_no
14 | FROM 
15 |     employee e
16 | JOIN 
17 |     dept_emp de ON e.emp_no = de.emp_no
18 | JOIN (
19 |     SELECT 
20 |         emp_no, 
21 |         MAX(from_date) AS max_from_date
22 |     FROM 
23 |         dept_emp
24 |     GROUP BY 
25 |         emp_no
26 | ) latest ON de.emp_no = latest.emp_no AND de.from_date = latest.max_from_date;
27 | 
28 | -- View that shows employee with their current department name
29 | CREATE VIEW v_full_employee AS
30 | SELECT
31 |     e.emp_no,
32 |     e.first_name, 
33 |     e.last_name,
34 |     e.birth_date, 
35 |     e.gender,
36 |     e.hire_date,
37 |     d.dept_name AS department
38 | FROM
39 |     employee e
40 | LEFT JOIN
41 |     emp_dept_current edc ON e.emp_no = edc.emp_no
42 | LEFT JOIN
43 |     department d ON edc.dept_no = d.dept_no;
44 | 
45 | -- View to get current managers for departments
46 | CREATE VIEW current_managers AS
47 | SELECT
48 |     d.dept_no,
49 |     d.dept_name,
50 |     e.first_name || ' ' || e.last_name AS manager
51 | FROM
52 |     department d
53 | LEFT JOIN
54 |     dept_manager dm ON d.dept_no = dm.dept_no
55 | JOIN (
56 |     SELECT
57 |         dept_no,
58 |         MAX(from_date) AS max_from_date
59 |     FROM
60 |         dept_manager
61 |     GROUP BY
62 |         dept_no
63 | ) latest ON dm.dept_no = latest.dept_no AND dm.from_date = latest.max_from_date
64 | LEFT JOIN
65 |     employee e ON dm.emp_no = e.emp_no;
66 | 
67 | -- Create a view showing departments with their managers
68 | CREATE VIEW v_full_department AS
69 | SELECT
70 |     dept_no,
71 |     dept_name,
72 |     manager
73 | FROM
74 |     current_managers;
```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "name": "dbhub",
 3 |   "version": "0.11.5",
 4 |   "description": "Universal Database MCP Server",
 5 |   "repository": {
 6 |     "type": "git",
 7 |     "url": "https://github.com/bytebase/dbhub.git"
 8 |   },
 9 |   "main": "dist/index.js",
10 |   "type": "module",
11 |   "bin": {
12 |     "dbhub": "dist/index.js"
13 |   },
14 |   "files": [
15 |     "dist",
16 |     "LICENSE",
17 |     "README.md"
18 |   ],
19 |   "scripts": {
20 |     "build": "tsup",
21 |     "start": "node dist/index.js",
22 |     "dev": "NODE_ENV=development tsx src/index.ts",
23 |     "crossdev": "cross-env NODE_ENV=development tsx src/index.ts",
24 |     "test": "vitest run",
25 |     "test:watch": "vitest",
26 |     "test:integration": "vitest run --testNamePattern='Integration Tests'",
27 |     "prepare": "[[ \"$NODE_ENV\" != \"production\" ]] && husky || echo \"Skipping husky in production\"",
28 |     "pre-commit": "lint-staged"
29 |   },
30 |   "keywords": [],
31 |   "author": "",
32 |   "license": "MIT",
33 |   "dependencies": {
34 |     "@azure/identity": "^4.8.0",
35 |     "@modelcontextprotocol/sdk": "^1.12.1",
36 |     "better-sqlite3": "^11.9.0",
37 |     "dotenv": "^16.4.7",
38 |     "express": "^4.18.2",
39 |     "mariadb": "^3.4.0",
40 |     "mssql": "^11.0.1",
41 |     "mysql2": "^3.13.0",
42 |     "pg": "^8.13.3",
43 |     "ssh-config": "^5.0.3",
44 |     "ssh2": "^1.16.0",
45 |     "zod": "^3.24.2"
46 |   },
47 |   "devDependencies": {
48 |     "@testcontainers/mariadb": "^11.0.3",
49 |     "@testcontainers/mssqlserver": "^11.0.3",
50 |     "@testcontainers/mysql": "^11.0.3",
51 |     "@testcontainers/postgresql": "^11.0.3",
52 |     "@types/better-sqlite3": "^7.6.12",
53 |     "@types/express": "^4.17.21",
54 |     "@types/mssql": "^9.1.7",
55 |     "@types/node": "^22.13.10",
56 |     "@types/pg": "^8.11.11",
57 |     "@types/ssh2": "^1.15.5",
58 |     "cross-env": "^7.0.3",
59 |     "husky": "^9.0.11",
60 |     "lint-staged": "^15.2.2",
61 |     "prettier": "^3.5.3",
62 |     "testcontainers": "^11.0.3",
63 |     "ts-node": "^10.9.2",
64 |     "tsup": "^8.4.0",
65 |     "tsx": "^4.19.3",
66 |     "typescript": "^5.8.2",
67 |     "vitest": "^1.6.1"
68 |   },
69 |   "compilerOptions": {
70 |     "target": "ES2020",
71 |     "module": "NodeNext",
72 |     "moduleResolution": "NodeNext",
73 |     "esModuleInterop": true,
74 |     "strict": true,
75 |     "outDir": "dist",
76 |     "rootDir": "src"
77 |   },
78 |   "include": [
79 |     "src/**/*"
80 |   ],
81 |   "lint-staged": {
82 |     "*.{js,ts}": "vitest related --run"
83 |   }
84 | }
85 | 
```

--------------------------------------------------------------------------------
/src/resources/index.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { McpServer, ResourceTemplate } from "@modelcontextprotocol/sdk/server/mcp.js";
 2 | import { tablesResourceHandler } from "./tables.js";
 3 | import { tableStructureResourceHandler } from "./schema.js";
 4 | import { schemasResourceHandler } from "./schemas.js";
 5 | import { indexesResourceHandler } from "./indexes.js";
 6 | import { proceduresResourceHandler, procedureDetailResourceHandler } from "./procedures.js";
 7 | 
 8 | // Export all resource handlers
 9 | export { tablesResourceHandler } from "./tables.js";
10 | export { tableStructureResourceHandler } from "./schema.js";
11 | export { schemasResourceHandler } from "./schemas.js";
12 | export { indexesResourceHandler } from "./indexes.js";
13 | export { proceduresResourceHandler, procedureDetailResourceHandler } from "./procedures.js";
14 | 
15 | /**
16 |  * Register all resource handlers with the MCP server
17 |  */
18 | export function registerResources(server: McpServer): void {
19 |   // Resource for listing all schemas
20 |   server.resource("schemas", "db://schemas", schemasResourceHandler);
21 | 
22 |   // Allow listing tables within a specific schema
23 |   server.resource(
24 |     "tables_in_schema",
25 |     new ResourceTemplate("db://schemas/{schemaName}/tables", { list: undefined }),
26 |     tablesResourceHandler
27 |   );
28 | 
29 |   // Resource for getting table structure within a specific database schema
30 |   server.resource(
31 |     "table_structure_in_schema",
32 |     new ResourceTemplate("db://schemas/{schemaName}/tables/{tableName}", { list: undefined }),
33 |     tableStructureResourceHandler
34 |   );
35 | 
36 |   // Resource for getting indexes for a table within a specific database schema
37 |   server.resource(
38 |     "indexes_in_table",
39 |     new ResourceTemplate("db://schemas/{schemaName}/tables/{tableName}/indexes", {
40 |       list: undefined,
41 |     }),
42 |     indexesResourceHandler
43 |   );
44 | 
45 |   // Resource for listing stored procedures within a schema
46 |   server.resource(
47 |     "procedures_in_schema",
48 |     new ResourceTemplate("db://schemas/{schemaName}/procedures", { list: undefined }),
49 |     proceduresResourceHandler
50 |   );
51 | 
52 |   // Resource for getting procedure detail within a schema
53 |   server.resource(
54 |     "procedure_detail_in_schema",
55 |     new ResourceTemplate("db://schemas/{schemaName}/procedures/{procedureName}", {
56 |       list: undefined,
57 |     }),
58 |     procedureDetailResourceHandler
59 |   );
60 | }
61 | 
```

--------------------------------------------------------------------------------
/src/resources/indexes.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { ConnectorManager } from "../connectors/manager.js";
 2 | import {
 3 |   createResourceSuccessResponse,
 4 |   createResourceErrorResponse,
 5 | } from "../utils/response-formatter.js";
 6 | 
 7 | /**
 8 |  * Indexes resource handler
 9 |  * Returns information about indexes on a table
10 |  */
11 | export async function indexesResourceHandler(uri: URL, variables: any, _extra: any) {
12 |   const connector = ConnectorManager.getCurrentConnector();
13 | 
14 |   // Extract schema and table names from URL variables
15 |   const schemaName =
16 |     variables && variables.schemaName
17 |       ? Array.isArray(variables.schemaName)
18 |         ? variables.schemaName[0]
19 |         : variables.schemaName
20 |       : undefined;
21 | 
22 |   const tableName =
23 |     variables && variables.tableName
24 |       ? Array.isArray(variables.tableName)
25 |         ? variables.tableName[0]
26 |         : variables.tableName
27 |       : undefined;
28 | 
29 |   if (!tableName) {
30 |     return createResourceErrorResponse(uri.href, "Table name is required", "MISSING_TABLE_NAME");
31 |   }
32 | 
33 |   try {
34 |     // If a schema name was provided, verify that it exists
35 |     if (schemaName) {
36 |       const availableSchemas = await connector.getSchemas();
37 |       if (!availableSchemas.includes(schemaName)) {
38 |         return createResourceErrorResponse(
39 |           uri.href,
40 |           `Schema '${schemaName}' does not exist or cannot be accessed`,
41 |           "SCHEMA_NOT_FOUND"
42 |         );
43 |       }
44 |     }
45 | 
46 |     // Check if table exists
47 |     const tableExists = await connector.tableExists(tableName, schemaName);
48 |     if (!tableExists) {
49 |       return createResourceErrorResponse(
50 |         uri.href,
51 |         `Table '${tableName}' does not exist in schema '${schemaName || "default"}'`,
52 |         "TABLE_NOT_FOUND"
53 |       );
54 |     }
55 | 
56 |     // Get indexes for the table
57 |     const indexes = await connector.getTableIndexes(tableName, schemaName);
58 | 
59 |     // Prepare response data
60 |     const responseData = {
61 |       table: tableName,
62 |       schema: schemaName,
63 |       indexes: indexes,
64 |       count: indexes.length,
65 |     };
66 | 
67 |     // Use the utility to create a standardized response
68 |     return createResourceSuccessResponse(uri.href, responseData);
69 |   } catch (error) {
70 |     return createResourceErrorResponse(
71 |       uri.href,
72 |       `Error retrieving indexes: ${(error as Error).message}`,
73 |       "INDEXES_RETRIEVAL_ERROR"
74 |     );
75 |   }
76 | }
77 | 
```

--------------------------------------------------------------------------------
/src/config/demo-loader.ts:
--------------------------------------------------------------------------------

```typescript
 1 | /**
 2 |  * Demo data loader for SQLite in-memory database
 3 |  *
 4 |  * This module loads the sample employee database into the SQLite in-memory database
 5 |  * when the --demo flag is specified.
 6 |  */
 7 | import fs from "fs";
 8 | import path from "path";
 9 | import { fileURLToPath } from "url";
10 | 
11 | // Create __dirname equivalent for ES modules
12 | const __filename = fileURLToPath(import.meta.url);
13 | const __dirname = path.dirname(__filename);
14 | 
15 | // Path to sample data files - will be bundled with the package
16 | // Try different paths to find the SQL files in development or production
17 | let DEMO_DATA_DIR: string;
18 | const projectRootPath = path.join(__dirname, "..", "..", "..");
19 | const projectResourcesPath = path.join(projectRootPath, "resources", "employee-sqlite");
20 | const distPath = path.join(__dirname, "resources", "employee-sqlite");
21 | 
22 | // First try the project root resources directory (for development)
23 | if (fs.existsSync(projectResourcesPath)) {
24 |   DEMO_DATA_DIR = projectResourcesPath;
25 | }
26 | // Then try dist directory (for production)
27 | else if (fs.existsSync(distPath)) {
28 |   DEMO_DATA_DIR = distPath;
29 | }
30 | // Fallback to a relative path from the current directory
31 | else {
32 |   DEMO_DATA_DIR = path.join(process.cwd(), "resources", "employee-sqlite");
33 |   if (!fs.existsSync(DEMO_DATA_DIR)) {
34 |     throw new Error(`Could not find employee-sqlite resources in any of the expected locations: 
35 |       - ${projectResourcesPath}
36 |       - ${distPath}
37 |       - ${DEMO_DATA_DIR}`);
38 |   }
39 | }
40 | 
41 | /**
42 |  * Load SQL file contents
43 |  */
44 | export function loadSqlFile(fileName: string): string {
45 |   const filePath = path.join(DEMO_DATA_DIR, fileName);
46 |   return fs.readFileSync(filePath, "utf8");
47 | }
48 | 
49 | /**
50 |  * Get SQLite DSN for in-memory database
51 |  */
52 | export function getInMemorySqliteDSN(): string {
53 |   return "sqlite:///:memory:";
54 | }
55 | 
56 | /**
57 |  * Load SQL files sequentially
58 |  */
59 | export function getSqliteInMemorySetupSql(): string {
60 |   // First, load the schema
61 |   let sql = loadSqlFile("employee.sql");
62 | 
63 |   // Replace .read directives with the actual file contents
64 |   // This is necessary because in-memory SQLite can't use .read
65 |   const readRegex = /\.read\s+([a-zA-Z0-9_]+\.sql)/g;
66 |   let match;
67 | 
68 |   while ((match = readRegex.exec(sql)) !== null) {
69 |     const includePath = match[1];
70 |     const includeContent = loadSqlFile(includePath);
71 | 
72 |     // Replace the .read line with the file contents
73 |     sql = sql.replace(match[0], includeContent);
74 |   }
75 | 
76 |   return sql;
77 | }
78 | 
```

--------------------------------------------------------------------------------
/src/utils/__tests__/ssh-tunnel.test.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { describe, it, expect } from 'vitest';
 2 | import { SSHTunnel } from '../ssh-tunnel.js';
 3 | import type { SSHTunnelConfig } from '../../types/ssh.js';
 4 | 
 5 | describe('SSHTunnel', () => {
 6 |   describe('Initial State', () => {
 7 |     it('should have initial state as disconnected', () => {
 8 |       const tunnel = new SSHTunnel();
 9 |       expect(tunnel.getIsConnected()).toBe(false);
10 |       expect(tunnel.getTunnelInfo()).toBeNull();
11 |     });
12 |   });
13 | 
14 |   describe('Tunnel State Management', () => {
15 |     it('should prevent establishing multiple tunnels', async () => {
16 |       const tunnel = new SSHTunnel();
17 |       
18 |       // Set tunnel as connected (simulating a connected state)
19 |       (tunnel as any).isConnected = true;
20 | 
21 |       const config: SSHTunnelConfig = {
22 |         host: 'ssh.example.com',
23 |         username: 'testuser',
24 |         password: 'testpass',
25 |       };
26 | 
27 |       const options = {
28 |         targetHost: 'database.local',
29 |         targetPort: 5432,
30 |       };
31 | 
32 |       await expect(tunnel.establish(config, options)).rejects.toThrow(
33 |         'SSH tunnel is already established'
34 |       );
35 |     });
36 | 
37 |     it('should handle close when not connected', async () => {
38 |       const tunnel = new SSHTunnel();
39 |       
40 |       // Should not throw when closing disconnected tunnel
41 |       await expect(tunnel.close()).resolves.toBeUndefined();
42 |     });
43 |   });
44 | 
45 |   describe('Configuration Validation', () => {
46 |     it('should validate authentication requirements', () => {
47 |       // Test that config validation logic exists
48 |       const validConfigWithPassword: SSHTunnelConfig = {
49 |         host: 'ssh.example.com',
50 |         username: 'testuser',
51 |         password: 'testpass',
52 |       };
53 | 
54 |       const validConfigWithKey: SSHTunnelConfig = {
55 |         host: 'ssh.example.com',
56 |         username: 'testuser',
57 |         privateKey: '/path/to/key',
58 |       };
59 | 
60 |       const validConfigWithKeyAndPassphrase: SSHTunnelConfig = {
61 |         host: 'ssh.example.com',
62 |         port: 2222,
63 |         username: 'testuser',
64 |         privateKey: '/path/to/key',
65 |         passphrase: 'keypassphrase',
66 |       };
67 | 
68 |       // These should be valid configurations
69 |       expect(validConfigWithPassword.host).toBe('ssh.example.com');
70 |       expect(validConfigWithPassword.username).toBe('testuser');
71 |       expect(validConfigWithPassword.password).toBe('testpass');
72 | 
73 |       expect(validConfigWithKey.privateKey).toBe('/path/to/key');
74 |       expect(validConfigWithKeyAndPassphrase.passphrase).toBe('keypassphrase');
75 |       expect(validConfigWithKeyAndPassphrase.port).toBe(2222);
76 |     });
77 |   });
78 | });
```

--------------------------------------------------------------------------------
/src/utils/dsn-obfuscate.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import type { SSHTunnelConfig } from '../types/ssh.js';
 2 | 
 3 | /**
 4 |  * Obfuscates the password in a DSN string for logging purposes
 5 |  * @param dsn The original DSN string
 6 |  * @returns DSN string with password replaced by asterisks
 7 |  */
 8 | export function obfuscateDSNPassword(dsn: string): string {
 9 |   if (!dsn) {
10 |     return dsn;
11 |   }
12 | 
13 |   try {
14 |     // Handle different DSN formats
15 |     const protocolMatch = dsn.match(/^([^:]+):/);
16 |     if (!protocolMatch) {
17 |       return dsn; // Not a recognizable DSN format
18 |     }
19 | 
20 |     const protocol = protocolMatch[1];
21 | 
22 |     // For SQLite file paths, don't obfuscate
23 |     if (protocol === 'sqlite') {
24 |       return dsn;
25 |     }
26 | 
27 |     // For other databases, look for password pattern: ://user:password@host
28 |     // We need to be careful with @ in passwords, so we'll find the last @ that separates password from host
29 |     const protocolPart = dsn.split('://')[1];
30 |     if (!protocolPart) {
31 |       return dsn;
32 |     }
33 |     
34 |     // Find the last @ to separate credentials from host
35 |     const lastAtIndex = protocolPart.lastIndexOf('@');
36 |     if (lastAtIndex === -1) {
37 |       return dsn; // No @ found, no password to obfuscate
38 |     }
39 |     
40 |     const credentialsPart = protocolPart.substring(0, lastAtIndex);
41 |     const hostPart = protocolPart.substring(lastAtIndex + 1);
42 |     
43 |     // Check if there's a colon in credentials (user:password format)
44 |     const colonIndex = credentialsPart.indexOf(':');
45 |     if (colonIndex === -1) {
46 |       return dsn; // No colon found, no password to obfuscate
47 |     }
48 |     
49 |     const username = credentialsPart.substring(0, colonIndex);
50 |     const password = credentialsPart.substring(colonIndex + 1);
51 |     const obfuscatedPassword = '*'.repeat(Math.min(password.length, 8));
52 |     
53 |     return `${protocol}://${username}:${obfuscatedPassword}@${hostPart}`;
54 |   } catch (error) {
55 |     // If any error occurs during obfuscation, return the original DSN
56 |     // This ensures we don't break functionality due to obfuscation issues
57 |     return dsn;
58 |   }
59 | }
60 | 
61 | /**
62 |  * Obfuscates sensitive information in SSH configuration for logging
63 |  * @param config The SSH tunnel configuration
64 |  * @returns SSH config with sensitive data replaced by asterisks
65 |  */
66 | export function obfuscateSSHConfig(config: SSHTunnelConfig): Partial<SSHTunnelConfig> {
67 |   const obfuscated: Partial<SSHTunnelConfig> = {
68 |     host: config.host,
69 |     port: config.port,
70 |     username: config.username,
71 |   };
72 |   
73 |   if (config.password) {
74 |     obfuscated.password = '*'.repeat(8);
75 |   }
76 |   
77 |   if (config.privateKey) {
78 |     obfuscated.privateKey = config.privateKey; // Keep path as-is
79 |   }
80 |   
81 |   if (config.passphrase) {
82 |     obfuscated.passphrase = '*'.repeat(8);
83 |   }
84 |   
85 |   return obfuscated;
86 | }
```

--------------------------------------------------------------------------------
/src/resources/schema.ts:
--------------------------------------------------------------------------------

```typescript
 1 | import { ConnectorManager } from "../connectors/manager.js";
 2 | import { Variables } from "@modelcontextprotocol/sdk/shared/uriTemplate.js";
 3 | import {
 4 |   createResourceSuccessResponse,
 5 |   createResourceErrorResponse,
 6 | } from "../utils/response-formatter.js";
 7 | 
 8 | /**
 9 |  * Schema resource handler
10 |  * Returns schema information for a specific table, optionally within a specific database schema
11 |  */
12 | export async function tableStructureResourceHandler(uri: URL, variables: Variables, _extra: any) {
13 |   const connector = ConnectorManager.getCurrentConnector();
14 | 
15 |   // Handle tableName which could be a string or string array from URL template
16 |   const tableName = Array.isArray(variables.tableName)
17 |     ? variables.tableName[0]
18 |     : (variables.tableName as string);
19 | 
20 |   // Extract schemaName if present
21 |   const schemaName = variables.schemaName
22 |     ? Array.isArray(variables.schemaName)
23 |       ? variables.schemaName[0]
24 |       : (variables.schemaName as string)
25 |     : undefined;
26 | 
27 |   try {
28 |     // If a schema name was provided, verify that it exists
29 |     if (schemaName) {
30 |       const availableSchemas = await connector.getSchemas();
31 |       if (!availableSchemas.includes(schemaName)) {
32 |         return createResourceErrorResponse(
33 |           uri.href,
34 |           `Schema '${schemaName}' does not exist or cannot be accessed`,
35 |           "SCHEMA_NOT_FOUND"
36 |         );
37 |       }
38 |     }
39 | 
40 |     // Check if the table exists in the schema before getting its structure
41 |     const tableExists = await connector.tableExists(tableName, schemaName);
42 |     if (!tableExists) {
43 |       const schemaInfo = schemaName ? ` in schema '${schemaName}'` : "";
44 |       return createResourceErrorResponse(
45 |         uri.href,
46 |         `Table '${tableName}'${schemaInfo} does not exist or cannot be accessed`,
47 |         "TABLE_NOT_FOUND"
48 |       );
49 |     }
50 | 
51 |     // Get the table schema now that we know it exists
52 |     const columns = await connector.getTableSchema(tableName, schemaName);
53 | 
54 |     // Create a more structured response
55 |     const formattedColumns = columns.map((col) => ({
56 |       name: col.column_name,
57 |       type: col.data_type,
58 |       nullable: col.is_nullable === "YES",
59 |       default: col.column_default,
60 |     }));
61 | 
62 |     // Prepare response data
63 |     const responseData = {
64 |       table: tableName,
65 |       schema: schemaName,
66 |       columns: formattedColumns,
67 |       count: formattedColumns.length,
68 |     };
69 | 
70 |     // Use the utility to create a standardized response
71 |     return createResourceSuccessResponse(uri.href, responseData);
72 |   } catch (error) {
73 |     // Handle any other errors that might occur
74 |     return createResourceErrorResponse(
75 |       uri.href,
76 |       `Error retrieving schema: ${(error as Error).message}`,
77 |       "SCHEMA_RETRIEVAL_ERROR"
78 |     );
79 |   }
80 | }
81 | 
```

--------------------------------------------------------------------------------
/.github/workflows/docker-publish.yml:
--------------------------------------------------------------------------------

```yaml
 1 | # Workflow to build and publish Docker images for DBHub
 2 | #
 3 | # This workflow:
 4 | # 1. Always pushes to the 'latest' tag when changes are pushed to the main branch
 5 | # 2. If package.json version changes, also pushes a version-specific tag
 6 | # 3. Builds for both amd64 and arm64 architectures
 7 | 
 8 | name: Publish to docker hub
 9 | 
10 | on:
11 |   push:
12 |     branches: [main]
13 | 
14 | env:
15 |   IMAGE_NAME: bytebase/dbhub
16 | 
17 | jobs:
18 |   build-and-push:
19 |     runs-on: ubuntu-latest
20 | 
21 |     steps:
22 |       - name: Checkout repository
23 |         uses: actions/checkout@v4
24 |         with:
25 |           fetch-depth: 2 # Fetch two commits to detect changes in package.json
26 | 
27 |       - name: Check for package.json version changes
28 |         id: check-version
29 |         run: |
30 |           # Get current and previous package.json content
31 |           git show HEAD:package.json > package.json.current
32 |           git show HEAD~1:package.json > package.json.previous 2>/dev/null || cp package.json.current package.json.previous
33 | 
34 |           # Extract versions
35 |           CURRENT_VERSION=$(jq -r '.version' package.json.current)
36 |           PREVIOUS_VERSION=$(jq -r '.version' package.json.previous)
37 | 
38 |           echo "Current version: $CURRENT_VERSION"
39 |           echo "Previous version: $PREVIOUS_VERSION"
40 | 
41 |           # Set output based on whether version changed
42 |           if [ "$CURRENT_VERSION" != "$PREVIOUS_VERSION" ]; then
43 |             echo "Version changed from $PREVIOUS_VERSION to $CURRENT_VERSION"
44 |             echo "VERSION_CHANGED=true" >> $GITHUB_OUTPUT
45 |             echo "VERSION=$CURRENT_VERSION" >> $GITHUB_OUTPUT
46 |           else
47 |             echo "Version unchanged: $CURRENT_VERSION"
48 |             echo "VERSION_CHANGED=false" >> $GITHUB_OUTPUT
49 |           fi
50 | 
51 |       - name: Set up Docker Buildx
52 |         uses: docker/setup-buildx-action@v3
53 | 
54 |       - name: Log in to Docker Hub
55 |         uses: docker/login-action@v3
56 |         with:
57 |           username: ${{ secrets.DOCKERHUB_USERNAME }}
58 |           password: ${{ secrets.DOCKERHUB_TOKEN }}
59 | 
60 |       - name: Prepare Docker tags
61 |         id: prep
62 |         run: |
63 |           # Always include latest tag
64 |           TAGS="${{ env.IMAGE_NAME }}:latest"
65 | 
66 |           # Add version tag if version changed
67 |           if [[ "${{ steps.check-version.outputs.VERSION_CHANGED }}" == "true" ]]; then
68 |             VERSION="${{ steps.check-version.outputs.VERSION }}"
69 |             TAGS="$TAGS,${{ env.IMAGE_NAME }}:$VERSION"
70 |             echo "Publishing with tags: latest, $VERSION"
71 |           else
72 |             echo "Publishing with tag: latest only"
73 |           fi
74 | 
75 |           echo "TAGS=$TAGS" >> $GITHUB_OUTPUT
76 | 
77 |       - name: Build and push Docker image
78 |         uses: docker/build-push-action@v5
79 |         with:
80 |           context: .
81 |           push: true
82 |           platforms: linux/amd64,linux/arm64
83 |           tags: ${{ steps.prep.outputs.TAGS }}
84 |           cache-from: type=gha
85 |           cache-to: type=gha,mode=max
86 | 
```

--------------------------------------------------------------------------------
/src/utils/sql-row-limiter.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Shared utility for applying row limits to SELECT queries only using database-native LIMIT clauses
  3 |  */
  4 | export class SQLRowLimiter {
  5 |   /**
  6 |    * Check if a SQL statement is a SELECT query that can benefit from row limiting
  7 |    * Only handles SELECT queries
  8 |    */
  9 |   static isSelectQuery(sql: string): boolean {
 10 |     const trimmed = sql.trim().toLowerCase();
 11 |     return trimmed.startsWith('select');
 12 |   }
 13 | 
 14 |   /**
 15 |    * Check if a SQL statement already has a LIMIT clause
 16 |    */
 17 |   static hasLimitClause(sql: string): boolean {
 18 |     // Simple regex to detect LIMIT clause - handles most common cases
 19 |     const limitRegex = /\blimit\s+\d+/i;
 20 |     return limitRegex.test(sql);
 21 |   }
 22 | 
 23 |   /**
 24 |    * Check if a SQL statement already has a TOP clause (SQL Server)
 25 |    */
 26 |   static hasTopClause(sql: string): boolean {
 27 |     // Simple regex to detect TOP clause - handles most common cases
 28 |     const topRegex = /\bselect\s+top\s+\d+/i;
 29 |     return topRegex.test(sql);
 30 |   }
 31 | 
 32 |   /**
 33 |    * Extract existing LIMIT value from SQL if present
 34 |    */
 35 |   static extractLimitValue(sql: string): number | null {
 36 |     const limitMatch = sql.match(/\blimit\s+(\d+)/i);
 37 |     if (limitMatch) {
 38 |       return parseInt(limitMatch[1], 10);
 39 |     }
 40 |     return null;
 41 |   }
 42 | 
 43 |   /**
 44 |    * Extract existing TOP value from SQL if present (SQL Server)
 45 |    */
 46 |   static extractTopValue(sql: string): number | null {
 47 |     const topMatch = sql.match(/\bselect\s+top\s+(\d+)/i);
 48 |     if (topMatch) {
 49 |       return parseInt(topMatch[1], 10);
 50 |     }
 51 |     return null;
 52 |   }
 53 | 
 54 |   /**
 55 |    * Add or modify LIMIT clause in a SQL statement
 56 |    */
 57 |   static applyLimitToQuery(sql: string, maxRows: number): string {
 58 |     const existingLimit = this.extractLimitValue(sql);
 59 |     
 60 |     if (existingLimit !== null) {
 61 |       // Use the minimum of existing limit and maxRows
 62 |       const effectiveLimit = Math.min(existingLimit, maxRows);
 63 |       return sql.replace(/\blimit\s+\d+/i, `LIMIT ${effectiveLimit}`);
 64 |     } else {
 65 |       // Add LIMIT clause to the end of the query
 66 |       // Handle semicolon at the end
 67 |       const trimmed = sql.trim();
 68 |       const hasSemicolon = trimmed.endsWith(';');
 69 |       const sqlWithoutSemicolon = hasSemicolon ? trimmed.slice(0, -1) : trimmed;
 70 |       
 71 |       return `${sqlWithoutSemicolon} LIMIT ${maxRows}${hasSemicolon ? ';' : ''}`;
 72 |     }
 73 |   }
 74 | 
 75 |   /**
 76 |    * Add or modify TOP clause in a SQL statement (SQL Server)
 77 |    */
 78 |   static applyTopToQuery(sql: string, maxRows: number): string {
 79 |     const existingTop = this.extractTopValue(sql);
 80 |     
 81 |     if (existingTop !== null) {
 82 |       // Use the minimum of existing top and maxRows
 83 |       const effectiveTop = Math.min(existingTop, maxRows);
 84 |       return sql.replace(/\bselect\s+top\s+\d+/i, `SELECT TOP ${effectiveTop}`);
 85 |     } else {
 86 |       // Add TOP clause after SELECT
 87 |       return sql.replace(/\bselect\s+/i, `SELECT TOP ${maxRows} `);
 88 |     }
 89 |   }
 90 | 
 91 |   /**
 92 |    * Apply maxRows limit to a SELECT query only
 93 |    */
 94 |   static applyMaxRows(sql: string, maxRows: number | undefined): string {
 95 |     if (!maxRows || !this.isSelectQuery(sql)) {
 96 |       return sql;
 97 |     }
 98 |     return this.applyLimitToQuery(sql, maxRows);
 99 |   }
100 | 
101 |   /**
102 |    * Apply maxRows limit to a SELECT query using SQL Server TOP syntax
103 |    */
104 |   static applyMaxRowsForSQLServer(sql: string, maxRows: number | undefined): string {
105 |     if (!maxRows || !this.isSelectQuery(sql)) {
106 |       return sql;
107 |     }
108 |     return this.applyTopToQuery(sql, maxRows);
109 |   }
110 | }
```

--------------------------------------------------------------------------------
/src/utils/response-formatter.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Response formatter utility for consistent API responses
  3 |  * Provides formatting for resources, tools, and prompts
  4 |  */
  5 | 
  6 | /**
  7 |  * Custom JSON replacer function to handle BigInt serialization
  8 |  * Converts BigInt values to strings with format: "123n"
  9 |  */
 10 | export function bigIntReplacer(_key: string, value: any): any {
 11 |   if (typeof value === 'bigint') {
 12 |     return value.toString();
 13 |   }
 14 |   return value;
 15 | }
 16 | 
 17 | /**
 18 |  * Create a success response with the given data
 19 |  */
 20 | export function formatSuccessResponse<T>(
 21 |   data: T,
 22 |   meta: Record<string, any> = {}
 23 | ): {
 24 |   success: true;
 25 |   data: T;
 26 |   meta?: Record<string, any>;
 27 | } {
 28 |   return {
 29 |     success: true,
 30 |     data,
 31 |     ...(Object.keys(meta).length > 0 ? { meta } : {}),
 32 |   };
 33 | }
 34 | 
 35 | /**
 36 |  * Create an error response with the given message and code
 37 |  */
 38 | export function formatErrorResponse(
 39 |   error: string,
 40 |   code: string = "ERROR",
 41 |   details?: any
 42 | ): {
 43 |   success: false;
 44 |   error: string;
 45 |   code: string;
 46 |   details?: any;
 47 | } {
 48 |   return {
 49 |     success: false,
 50 |     error,
 51 |     code,
 52 |     ...(details ? { details } : {}),
 53 |   };
 54 | }
 55 | 
 56 | /**
 57 |  * Create a tool error response object
 58 |  */
 59 | export function createToolErrorResponse(error: string, code: string = "ERROR", details?: any) {
 60 |   return {
 61 |     content: [
 62 |       {
 63 |         type: "text" as const,
 64 |         text: JSON.stringify(formatErrorResponse(error, code, details), bigIntReplacer, 2),
 65 |         mimeType: "application/json",
 66 |       },
 67 |     ],
 68 |     isError: true,
 69 |   };
 70 | }
 71 | 
 72 | /**
 73 |  * Create a tool success response object
 74 |  */
 75 | export function createToolSuccessResponse<T>(data: T, meta: Record<string, any> = {}) {
 76 |   return {
 77 |     content: [
 78 |       {
 79 |         type: "text" as const,
 80 |         text: JSON.stringify(formatSuccessResponse(data, meta), bigIntReplacer, 2),
 81 |         mimeType: "application/json",
 82 |       },
 83 |     ],
 84 |   };
 85 | }
 86 | 
 87 | /**
 88 |  * Create a resource error response object
 89 |  */
 90 | export function createResourceErrorResponse(
 91 |   uri: string,
 92 |   error: string,
 93 |   code: string = "ERROR",
 94 |   details?: any
 95 | ) {
 96 |   return {
 97 |     contents: [
 98 |       {
 99 |         uri,
100 |         text: JSON.stringify(formatErrorResponse(error, code, details), bigIntReplacer, 2),
101 |         mimeType: "application/json",
102 |       },
103 |     ],
104 |   };
105 | }
106 | 
107 | /**
108 |  * Create a resource success response object
109 |  */
110 | export function createResourceSuccessResponse<T>(
111 |   uri: string,
112 |   data: T,
113 |   meta: Record<string, any> = {}
114 | ) {
115 |   return {
116 |     contents: [
117 |       {
118 |         uri,
119 |         text: JSON.stringify(formatSuccessResponse(data, meta), bigIntReplacer, 2),
120 |         mimeType: "application/json",
121 |       },
122 |     ],
123 |   };
124 | }
125 | 
126 | /**
127 |  * Format a successful prompt response in the MCP format
128 |  */
129 | export function formatPromptSuccessResponse(
130 |   text: string,
131 |   references: string[] = []
132 | ): {
133 |   messages: Array<{
134 |     role: "assistant";
135 |     content: {
136 |       type: "text";
137 |       text: string;
138 |     };
139 |   }>;
140 |   references?: string[];
141 |   _meta?: Record<string, unknown>;
142 |   [key: string]: unknown;
143 | } {
144 |   return {
145 |     messages: [
146 |       {
147 |         role: "assistant",
148 |         content: {
149 |           type: "text",
150 |           text,
151 |         },
152 |       },
153 |     ],
154 |     ...(references.length > 0 ? { references } : {}),
155 |   };
156 | }
157 | 
158 | /**
159 |  * Format an error prompt response in the MCP format
160 |  */
161 | export function formatPromptErrorResponse(
162 |   error: string,
163 |   code: string = "ERROR"
164 | ): {
165 |   messages: Array<{
166 |     role: "assistant";
167 |     content: {
168 |       type: "text";
169 |       text: string;
170 |     };
171 |   }>;
172 |   error: string;
173 |   code: string;
174 |   _meta?: Record<string, unknown>;
175 |   [key: string]: unknown;
176 | } {
177 |   return {
178 |     messages: [
179 |       {
180 |         role: "assistant",
181 |         content: {
182 |           type: "text",
183 |           text: `Error: ${error}`,
184 |         },
185 |       },
186 |     ],
187 |     error,
188 |     code,
189 |   };
190 | }
191 | 
```

--------------------------------------------------------------------------------
/src/tools/execute-sql.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from "zod";
  2 | import { ConnectorManager } from "../connectors/manager.js";
  3 | import { createToolSuccessResponse, createToolErrorResponse } from "../utils/response-formatter.js";
  4 | import { isReadOnlyMode } from "../config/env.js";
  5 | import { allowedKeywords } from "../utils/allowed-keywords.js";
  6 | import { ConnectorType } from "../connectors/interface.js";
  7 | 
  8 | // Schema for execute_sql tool
  9 | export const executeSqlSchema = {
 10 |   sql: z.string().describe("SQL query or multiple SQL statements to execute (separated by semicolons)"),
 11 | };
 12 | 
 13 | /**
 14 |  * Split SQL string into individual statements, handling semicolons properly
 15 |  * @param sql The SQL string to split
 16 |  * @returns Array of individual SQL statements
 17 |  */
 18 | function splitSQLStatements(sql: string): string[] {
 19 |   // Split by semicolon and filter out empty statements
 20 |   return sql.split(';')
 21 |     .map(statement => statement.trim())
 22 |     .filter(statement => statement.length > 0);
 23 | }
 24 | 
 25 | /**
 26 |  * Remove SQL comments from a query
 27 |  * @param sql The SQL query to clean
 28 |  * @returns The SQL query without comments
 29 |  */
 30 | function stripSQLComments(sql: string): string {
 31 |   // Remove single-line comments (-- comment)
 32 |   let cleaned = sql.split('\n').map(line => {
 33 |     const commentIndex = line.indexOf('--');
 34 |     return commentIndex >= 0 ? line.substring(0, commentIndex) : line;
 35 |   }).join('\n');
 36 |   
 37 |   // Remove multi-line comments (/* comment */)
 38 |   cleaned = cleaned.replace(/\/\*[\s\S]*?\*\//g, ' ');
 39 |   
 40 |   return cleaned.trim();
 41 | }
 42 | 
 43 | /**
 44 |  * Check if a SQL query is read-only based on its first keyword
 45 |  * @param sql The SQL query to check
 46 |  * @param connectorType The database type to check against
 47 |  * @returns True if the query is read-only (starts with allowed keywords)
 48 |  */
 49 | function isReadOnlySQL(sql: string, connectorType: ConnectorType): boolean {
 50 |   // Strip comments before analyzing
 51 |   const cleanedSQL = stripSQLComments(sql).toLowerCase();
 52 |   
 53 |   // If the statement is empty after removing comments, consider it read-only
 54 |   if (!cleanedSQL) {
 55 |     return true;
 56 |   }
 57 |   
 58 |   const firstWord = cleanedSQL.split(/\s+/)[0];
 59 |   
 60 |   // Get the appropriate allowed keywords list for this database type
 61 |   const keywordList = allowedKeywords[connectorType] || allowedKeywords.default || [];
 62 |   
 63 |   return keywordList.includes(firstWord);
 64 | }
 65 | 
 66 | /**
 67 |  * Check if all SQL statements in a multi-statement query are read-only
 68 |  * @param sql The SQL string (possibly containing multiple statements)
 69 |  * @param connectorType The database type to check against
 70 |  * @returns True if all statements are read-only
 71 |  */
 72 | function areAllStatementsReadOnly(sql: string, connectorType: ConnectorType): boolean {
 73 |   const statements = splitSQLStatements(sql);
 74 |   return statements.every(statement => isReadOnlySQL(statement, connectorType));
 75 | }
 76 | 
 77 | /**
 78 |  * execute_sql tool handler
 79 |  * Executes a SQL query and returns the results
 80 |  */
 81 | export async function executeSqlToolHandler({ sql }: { sql: string }, _extra: any) {
 82 |   const connector = ConnectorManager.getCurrentConnector();
 83 |   const executeOptions = ConnectorManager.getCurrentExecuteOptions();
 84 | 
 85 |   try {
 86 |     // Check if SQL is allowed based on readonly mode
 87 |     if (isReadOnlyMode() && !areAllStatementsReadOnly(sql, connector.id)) {
 88 |       return createToolErrorResponse(
 89 |         `Read-only mode is enabled. Only the following SQL operations are allowed: ${allowedKeywords[connector.id]?.join(", ") || "none"}`,
 90 |         "READONLY_VIOLATION"
 91 |       );
 92 |     }
 93 |     
 94 |     // Execute the SQL (single or multiple statements) if validation passed
 95 |     const result = await connector.executeSQL(sql, executeOptions);
 96 | 
 97 |     // Build response data
 98 |     const responseData = {
 99 |       rows: result.rows,
100 |       count: result.rows.length,
101 |     };
102 | 
103 |     return createToolSuccessResponse(responseData);
104 |   } catch (error) {
105 |     return createToolErrorResponse((error as Error).message, "EXECUTION_ERROR");
106 |   }
107 | }
108 | 
```

--------------------------------------------------------------------------------
/src/resources/procedures.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { ConnectorManager } from "../connectors/manager.js";
  2 | import {
  3 |   createResourceSuccessResponse,
  4 |   createResourceErrorResponse,
  5 | } from "../utils/response-formatter.js";
  6 | 
  7 | /**
  8 |  * Stored procedures/functions resource handler
  9 |  * Returns a list of all stored procedures/functions in the database or within a specific schema
 10 |  */
 11 | export async function proceduresResourceHandler(uri: URL, variables: any, _extra: any) {
 12 |   const connector = ConnectorManager.getCurrentConnector();
 13 | 
 14 |   // Extract the schema name from URL variables if present
 15 |   const schemaName =
 16 |     variables && variables.schemaName
 17 |       ? Array.isArray(variables.schemaName)
 18 |         ? variables.schemaName[0]
 19 |         : variables.schemaName
 20 |       : undefined;
 21 | 
 22 |   try {
 23 |     // If a schema name was provided, verify that it exists
 24 |     if (schemaName) {
 25 |       const availableSchemas = await connector.getSchemas();
 26 |       if (!availableSchemas.includes(schemaName)) {
 27 |         return createResourceErrorResponse(
 28 |           uri.href,
 29 |           `Schema '${schemaName}' does not exist or cannot be accessed`,
 30 |           "SCHEMA_NOT_FOUND"
 31 |         );
 32 |       }
 33 |     }
 34 | 
 35 |     // Get stored procedures with optional schema filter
 36 |     const procedureNames = await connector.getStoredProcedures(schemaName);
 37 | 
 38 |     // Prepare response data
 39 |     const responseData = {
 40 |       procedures: procedureNames,
 41 |       count: procedureNames.length,
 42 |       schema: schemaName,
 43 |     };
 44 | 
 45 |     // Use the utility to create a standardized response
 46 |     return createResourceSuccessResponse(uri.href, responseData);
 47 |   } catch (error) {
 48 |     return createResourceErrorResponse(
 49 |       uri.href,
 50 |       `Error retrieving stored procedures: ${(error as Error).message}`,
 51 |       "PROCEDURES_RETRIEVAL_ERROR"
 52 |     );
 53 |   }
 54 | }
 55 | 
 56 | /**
 57 |  * Stored procedure/function details resource handler
 58 |  * Returns details for a specific stored procedure/function
 59 |  */
 60 | export async function procedureDetailResourceHandler(uri: URL, variables: any, _extra: any) {
 61 |   const connector = ConnectorManager.getCurrentConnector();
 62 | 
 63 |   // Extract parameters from URL variables
 64 |   const schemaName =
 65 |     variables && variables.schemaName
 66 |       ? Array.isArray(variables.schemaName)
 67 |         ? variables.schemaName[0]
 68 |         : variables.schemaName
 69 |       : undefined;
 70 | 
 71 |   const procedureName =
 72 |     variables && variables.procedureName
 73 |       ? Array.isArray(variables.procedureName)
 74 |         ? variables.procedureName[0]
 75 |         : variables.procedureName
 76 |       : undefined;
 77 | 
 78 |   // Validate required parameters
 79 |   if (!procedureName) {
 80 |     return createResourceErrorResponse(uri.href, "Procedure name is required", "MISSING_PARAMETER");
 81 |   }
 82 | 
 83 |   try {
 84 |     // If a schema name was provided, verify that it exists
 85 |     if (schemaName) {
 86 |       const availableSchemas = await connector.getSchemas();
 87 |       if (!availableSchemas.includes(schemaName)) {
 88 |         return createResourceErrorResponse(
 89 |           uri.href,
 90 |           `Schema '${schemaName}' does not exist or cannot be accessed`,
 91 |           "SCHEMA_NOT_FOUND"
 92 |         );
 93 |       }
 94 |     }
 95 | 
 96 |     // Get procedure details
 97 |     const procedureDetails = await connector.getStoredProcedureDetail(procedureName, schemaName);
 98 | 
 99 |     // Prepare response data
100 |     const responseData = {
101 |       procedureName: procedureDetails.procedure_name,
102 |       procedureType: procedureDetails.procedure_type,
103 |       language: procedureDetails.language,
104 |       parameters: procedureDetails.parameter_list,
105 |       returnType: procedureDetails.return_type,
106 |       definition: procedureDetails.definition,
107 |       schema: schemaName,
108 |     };
109 | 
110 |     // Use the utility to create a standardized response
111 |     return createResourceSuccessResponse(uri.href, responseData);
112 |   } catch (error) {
113 |     return createResourceErrorResponse(
114 |       uri.href,
115 |       `Error retrieving procedure details: ${(error as Error).message}`,
116 |       "PROCEDURE_DETAILS_ERROR"
117 |     );
118 |   }
119 | }
120 | 
```

--------------------------------------------------------------------------------
/src/utils/__tests__/safe-url.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect } from 'vitest';
  2 | import { SafeURL } from '../safe-url.js';
  3 | 
  4 | describe('SafeURL', () => {
  5 |   it('should parse a simple DSN correctly', () => {
  6 |     const url = new SafeURL('postgres://localhost:5432/dbname');
  7 |     
  8 |     expect(url.protocol).toBe('postgres:');
  9 |     expect(url.hostname).toBe('localhost');
 10 |     expect(url.port).toBe('5432');
 11 |     expect(url.pathname).toBe('/dbname');
 12 |     expect(url.username).toBe('');
 13 |     expect(url.password).toBe('');
 14 |     expect(url.searchParams.size).toBe(0);
 15 |   });
 16 | 
 17 |   it('should parse a DSN with authentication correctly', () => {
 18 |     const url = new SafeURL('postgres://user:password@localhost:5432/dbname');
 19 |     
 20 |     expect(url.protocol).toBe('postgres:');
 21 |     expect(url.hostname).toBe('localhost');
 22 |     expect(url.port).toBe('5432');
 23 |     expect(url.pathname).toBe('/dbname');
 24 |     expect(url.username).toBe('user');
 25 |     expect(url.password).toBe('password');
 26 |     expect(url.searchParams.size).toBe(0);
 27 |   });
 28 | 
 29 |   it('should handle special characters in password correctly', () => {
 30 |     const url = new SafeURL('postgres://user:pass%23word@localhost:5432/dbname');
 31 |     
 32 |     expect(url.protocol).toBe('postgres:');
 33 |     expect(url.hostname).toBe('localhost');
 34 |     expect(url.port).toBe('5432');
 35 |     expect(url.pathname).toBe('/dbname');
 36 |     expect(url.username).toBe('user');
 37 |     expect(url.password).toBe('pass#word');
 38 |     expect(url.searchParams.size).toBe(0);
 39 |   });
 40 | 
 41 |   it('should handle unencoded special characters in password correctly', () => {
 42 |     const url = new SafeURL('postgres://user:pass#word@localhost:5432/dbname');
 43 |     
 44 |     expect(url.protocol).toBe('postgres:');
 45 |     expect(url.hostname).toBe('localhost');
 46 |     expect(url.port).toBe('5432');
 47 |     expect(url.pathname).toBe('/dbname');
 48 |     expect(url.username).toBe('user');
 49 |     expect(url.password).toBe('pass#word');
 50 |     expect(url.searchParams.size).toBe(0);
 51 |   });
 52 | 
 53 |   it('should parse query parameters correctly', () => {
 54 |     const url = new SafeURL('postgres://localhost:5432/dbname?sslmode=require&timeout=30');
 55 |     
 56 |     expect(url.protocol).toBe('postgres:');
 57 |     expect(url.hostname).toBe('localhost');
 58 |     expect(url.port).toBe('5432');
 59 |     expect(url.pathname).toBe('/dbname');
 60 |     expect(url.searchParams.size).toBe(2);
 61 |     expect(url.getSearchParam('sslmode')).toBe('require');
 62 |     expect(url.getSearchParam('timeout')).toBe('30');
 63 |   });
 64 | 
 65 |   it('should handle special characters in query parameters', () => {
 66 |     const url = new SafeURL('postgres://localhost:5432/dbname?param=value%20with%20spaces');
 67 |     
 68 |     expect(url.getSearchParam('param')).toBe('value with spaces');
 69 |   });
 70 | 
 71 |   it('should handle a DSN without a pathname', () => {
 72 |     const url = new SafeURL('postgres://localhost:5432');
 73 |     
 74 |     expect(url.protocol).toBe('postgres:');
 75 |     expect(url.hostname).toBe('localhost');
 76 |     expect(url.port).toBe('5432');
 77 |     expect(url.pathname).toBe('');
 78 |   });
 79 | 
 80 |   it('should handle both username and password with special characters', () => {
 81 |     const url = new SafeURL('postgres://user%40domain:pass%26word@localhost:5432/dbname');
 82 |     
 83 |     expect(url.username).toBe('user@domain');
 84 |     expect(url.password).toBe('pass&word');
 85 |   });
 86 | 
 87 |   it('should support the forEachSearchParam method', () => {
 88 |     const url = new SafeURL('postgres://localhost:5432/dbname?param1=value1&param2=value2');
 89 |     const params: Record<string, string> = {};
 90 |     
 91 |     url.forEachSearchParam((value, key) => {
 92 |       params[key] = value;
 93 |     });
 94 |     
 95 |     expect(Object.keys(params).length).toBe(2);
 96 |     expect(params['param1']).toBe('value1');
 97 |     expect(params['param2']).toBe('value2');
 98 |   });
 99 | 
100 |   it('should throw an error for empty URLs', () => {
101 |     expect(() => new SafeURL('')).toThrow('URL string cannot be empty');
102 |   });
103 |   
104 |   it('should throw an error for URLs without a protocol', () => {
105 |     expect(() => new SafeURL('localhost:5432/dbname')).toThrow('Invalid URL format: missing protocol');
106 |   });
107 | });
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/employee.sql:
--------------------------------------------------------------------------------

```sql
  1 | --  Sample employee database 
  2 | --  See changelog table for details
  3 | --  Copyright (C) 2007,2008, MySQL AB
  4 | --  
  5 | --  Original data created by Fusheng Wang and Carlo Zaniolo
  6 | --  http://www.cs.aau.dk/TimeCenter/software.htm
  7 | --  http://www.cs.aau.dk/TimeCenter/Data/employeeTemporalDataSet.zip
  8 | -- 
  9 | --  Current schema by Giuseppe Maxia 
 10 | --  Data conversion from XML to relational by Patrick Crews
 11 | --  SQLite adaptation by Claude Code
 12 | -- 
 13 | -- This work is licensed under the 
 14 | -- Creative Commons Attribution-Share Alike 3.0 Unported License. 
 15 | -- To view a copy of this license, visit 
 16 | -- http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to 
 17 | -- Creative Commons, 171 Second Street, Suite 300, San Francisco, 
 18 | -- California, 94105, USA.
 19 | -- 
 20 | --  DISCLAIMER
 21 | --  To the best of our knowledge, this data is fabricated, and
 22 | --  it does not correspond to real people. 
 23 | --  Any similarity to existing people is purely coincidental.
 24 | -- 
 25 | 
 26 | PRAGMA foreign_keys = ON;
 27 | 
 28 | SELECT 'CREATING DATABASE STRUCTURE' as 'INFO';
 29 | 
 30 | DROP TABLE IF EXISTS dept_emp;
 31 | DROP TABLE IF EXISTS dept_manager;
 32 | DROP TABLE IF EXISTS title;
 33 | DROP TABLE IF EXISTS salary;
 34 | DROP TABLE IF EXISTS employee;
 35 | DROP TABLE IF EXISTS department;
 36 | DROP VIEW IF EXISTS dept_emp_latest_date;
 37 | DROP VIEW IF EXISTS current_dept_emp;
 38 | 
 39 | CREATE TABLE employee (
 40 |     emp_no      INTEGER         NOT NULL,
 41 |     birth_date  DATE            NOT NULL,
 42 |     first_name  TEXT            NOT NULL,
 43 |     last_name   TEXT            NOT NULL,
 44 |     gender      TEXT            NOT NULL CHECK (gender IN ('M','F')),
 45 |     hire_date   DATE            NOT NULL,
 46 |     PRIMARY KEY (emp_no)
 47 | );
 48 | 
 49 | CREATE TABLE department (
 50 |     dept_no     TEXT            NOT NULL,
 51 |     dept_name   TEXT            NOT NULL,
 52 |     PRIMARY KEY (dept_no),
 53 |     UNIQUE      (dept_name)
 54 | );
 55 | 
 56 | CREATE TABLE dept_manager (
 57 |    emp_no       INTEGER         NOT NULL,
 58 |    dept_no      TEXT            NOT NULL,
 59 |    from_date    DATE            NOT NULL,
 60 |    to_date      DATE            NOT NULL,
 61 |    FOREIGN KEY (emp_no)  REFERENCES employee (emp_no)    ON DELETE CASCADE,
 62 |    FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE,
 63 |    PRIMARY KEY (emp_no,dept_no)
 64 | ); 
 65 | 
 66 | CREATE TABLE dept_emp (
 67 |     emp_no      INTEGER         NOT NULL,
 68 |     dept_no     TEXT            NOT NULL,
 69 |     from_date   DATE            NOT NULL,
 70 |     to_date     DATE            NOT NULL,
 71 |     FOREIGN KEY (emp_no)  REFERENCES employee (emp_no)   ON DELETE CASCADE,
 72 |     FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE,
 73 |     PRIMARY KEY (emp_no,dept_no)
 74 | );
 75 | 
 76 | CREATE TABLE title (
 77 |     emp_no      INTEGER         NOT NULL,
 78 |     title       TEXT            NOT NULL,
 79 |     from_date   DATE            NOT NULL,
 80 |     to_date     DATE,
 81 |     FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE,
 82 |     PRIMARY KEY (emp_no,title,from_date)
 83 | );
 84 | 
 85 | CREATE TABLE salary (
 86 |     emp_no      INTEGER         NOT NULL,
 87 |     amount      INTEGER         NOT NULL,
 88 |     from_date   DATE            NOT NULL,
 89 |     to_date     DATE            NOT NULL,
 90 |     FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE,
 91 |     PRIMARY KEY (emp_no,from_date)
 92 | );
 93 | 
 94 | CREATE VIEW dept_emp_latest_date AS
 95 |     SELECT emp_no, MAX(from_date) AS from_date, MAX(to_date) AS to_date
 96 |     FROM dept_emp
 97 |     GROUP BY emp_no;
 98 | 
 99 | -- shows only the current department for each employee
100 | CREATE VIEW current_dept_emp AS
101 |     SELECT l.emp_no, dept_no, l.from_date, l.to_date
102 |     FROM dept_emp d
103 |         INNER JOIN dept_emp_latest_date l
104 |         ON d.emp_no=l.emp_no AND d.from_date=l.from_date AND l.to_date = d.to_date;
105 | 
106 | SELECT 'LOADING department' as 'INFO';
107 | .read load_department.sql
108 | SELECT 'LOADING employee' as 'INFO';
109 | .read load_employee.sql
110 | SELECT 'LOADING dept_emp' as 'INFO';
111 | .read load_dept_emp.sql
112 | SELECT 'LOADING dept_manager' as 'INFO';
113 | .read load_dept_manager.sql
114 | SELECT 'LOADING title' as 'INFO';
115 | .read load_title.sql
116 | SELECT 'LOADING salary' as 'INFO';
117 | .read load_salary1.sql
```

--------------------------------------------------------------------------------
/resources/employee-sqlite/test_employee_md5.sql:
--------------------------------------------------------------------------------

```sql
  1 | --  Sample employee database 
  2 | --  See changelog table for details
  3 | --  Copyright (C) 2007,2008, MySQL AB
  4 | --  
  5 | --  Original data created by Fusheng Wang and Carlo Zaniolo
  6 | --  http://www.cs.aau.dk/TimeCenter/software.htm
  7 | --  http://www.cs.aau.dk/TimeCenter/Data/employeeTemporalDataSet.zip
  8 | -- 
  9 | --  Current schema by Giuseppe Maxia 
 10 | --  Data conversion from XML to relational by Patrick Crews
 11 | --  SQLite adaptation by Claude Code
 12 | -- 
 13 | -- This work is licensed under the 
 14 | -- Creative Commons Attribution-Share Alike 3.0 Unported License. 
 15 | -- To view a copy of this license, visit 
 16 | -- http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to 
 17 | -- Creative Commons, 171 Second Street, Suite 300, San Francisco, 
 18 | -- California, 94105, USA.
 19 | -- 
 20 | --  DISCLAIMER
 21 | --  To the best of our knowledge, this data is fabricated, and
 22 | --  it does not correspond to real people. 
 23 | --  Any similarity to existing people is purely coincidental.
 24 | -- 
 25 | 
 26 | SELECT 'TESTING INSTALLATION' as 'INFO';
 27 | 
 28 | DROP TABLE IF EXISTS expected_value;
 29 | DROP TABLE IF EXISTS found_value;
 30 | 
 31 | CREATE TABLE expected_value (
 32 |     table_name TEXT NOT NULL PRIMARY KEY,
 33 |     recs INTEGER NOT NULL,
 34 |     crc_md5 TEXT NOT NULL
 35 | );
 36 | 
 37 | CREATE TABLE found_value (
 38 |     table_name TEXT NOT NULL PRIMARY KEY,
 39 |     recs INTEGER NOT NULL,
 40 |     crc_md5 TEXT NOT NULL
 41 | );
 42 | 
 43 | INSERT INTO expected_value VALUES 
 44 | ('employee',    1000, '595460127fb609c2b110b1796083e242'),
 45 | ('department',     9, 'd1af5e170d2d1591d776d5638d71fc5f'),
 46 | ('dept_manager',  16, '8ff425d5ad6dc56975998d1893b8dca9'),
 47 | ('dept_emp',    1103, 'e302aa5b56a69b49e40eb0d60674addc'),
 48 | ('title',       1470, 'ba77dd331ce00f76c1643a7d73cdcee6'),
 49 | ('salary',      9488, '61f22cfece4d34f5bb94c9f05a3da3ef');
 50 | 
 51 | SELECT table_name, recs AS expected_record, crc_md5 AS expected_crc FROM expected_value;
 52 | 
 53 | DROP TABLE IF EXISTS tchecksum;
 54 | CREATE TABLE tchecksum (chk TEXT);
 55 | 
 56 | -- For SQLite, we need to use a different approach for MD5 calculation
 57 | -- Insert employee checksums
 58 | INSERT INTO found_value
 59 | SELECT 'employee', COUNT(*), 
 60 |        (SELECT hex(md5(group_concat(emp_no||birth_date||first_name||last_name||gender||hire_date, '#')))
 61 |         FROM (SELECT * FROM employee ORDER BY emp_no))
 62 | FROM employee;
 63 | 
 64 | -- Insert department checksums
 65 | INSERT INTO found_value
 66 | SELECT 'department', COUNT(*),
 67 |        (SELECT hex(md5(group_concat(dept_no||dept_name, '#')))
 68 |         FROM (SELECT * FROM department ORDER BY dept_no))
 69 | FROM department;
 70 | 
 71 | -- Insert dept_manager checksums
 72 | INSERT INTO found_value
 73 | SELECT 'dept_manager', COUNT(*),
 74 |        (SELECT hex(md5(group_concat(dept_no||emp_no||from_date||to_date, '#')))
 75 |         FROM (SELECT * FROM dept_manager ORDER BY dept_no, emp_no))
 76 | FROM dept_manager;
 77 | 
 78 | -- Insert dept_emp checksums
 79 | INSERT INTO found_value
 80 | SELECT 'dept_emp', COUNT(*),
 81 |        (SELECT hex(md5(group_concat(dept_no||emp_no||from_date||to_date, '#')))
 82 |         FROM (SELECT * FROM dept_emp ORDER BY dept_no, emp_no))
 83 | FROM dept_emp;
 84 | 
 85 | -- Insert title checksums
 86 | INSERT INTO found_value
 87 | SELECT 'title', COUNT(*),
 88 |        (SELECT hex(md5(group_concat(emp_no||title||from_date||IFNULL(to_date,''), '#')))
 89 |         FROM (SELECT * FROM title ORDER BY emp_no, title, from_date))
 90 | FROM title;
 91 | 
 92 | -- Insert salary checksums
 93 | INSERT INTO found_value
 94 | SELECT 'salary', COUNT(*),
 95 |        (SELECT hex(md5(group_concat(emp_no||amount||from_date||to_date, '#')))
 96 |         FROM (SELECT * FROM salary ORDER BY emp_no, from_date, to_date))
 97 | FROM salary;
 98 | 
 99 | SELECT table_name, recs as 'found_records', crc_md5 as found_crc FROM found_value;
100 | 
101 | -- Compare expected vs found
102 | SELECT  
103 |     e.table_name, 
104 |     CASE WHEN e.recs=f.recs THEN 'OK' ELSE 'not ok' END AS records_match, 
105 |     CASE WHEN e.crc_md5=f.crc_md5 THEN 'ok' ELSE 'not ok' END AS crc_match 
106 | FROM 
107 |     expected_value e 
108 |     JOIN found_value f USING (table_name);
109 | 
110 | -- Check for failures
111 | SELECT 
112 |     'CRC' as summary, 
113 |     CASE WHEN (SELECT COUNT(*) FROM expected_value e JOIN found_value f USING(table_name) WHERE f.crc_md5 != e.crc_md5) = 0 
114 |          THEN 'OK' ELSE 'FAIL' END as 'result'
115 | UNION ALL
116 | SELECT 
117 |     'count', 
118 |     CASE WHEN (SELECT COUNT(*) FROM expected_value e JOIN found_value f USING(table_name) WHERE f.recs != e.recs) = 0 
119 |          THEN 'OK' ELSE 'FAIL' END;
```

--------------------------------------------------------------------------------
/src/utils/ssh-config-parser.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { readFileSync, existsSync } from 'fs';
  2 | import { homedir } from 'os';
  3 | import { join } from 'path';
  4 | import SSHConfig from 'ssh-config';
  5 | import type { SSHTunnelConfig } from '../types/ssh.js';
  6 | 
  7 | /**
  8 |  * Default SSH key paths to check if no IdentityFile is specified
  9 |  */
 10 | const DEFAULT_SSH_KEYS = [
 11 |   '~/.ssh/id_rsa',
 12 |   '~/.ssh/id_ed25519',
 13 |   '~/.ssh/id_ecdsa',
 14 |   '~/.ssh/id_dsa'
 15 | ];
 16 | 
 17 | /**
 18 |  * Expand tilde (~) in file paths to home directory
 19 |  */
 20 | function expandTilde(filePath: string): string {
 21 |   if (filePath.startsWith('~/')) {
 22 |     return join(homedir(), filePath.substring(2));
 23 |   }
 24 |   return filePath;
 25 | }
 26 | 
 27 | /**
 28 |  * Check if a file exists
 29 |  */
 30 | function fileExists(filePath: string): boolean {
 31 |   try {
 32 |     return existsSync(expandTilde(filePath));
 33 |   } catch {
 34 |     return false;
 35 |   }
 36 | }
 37 | 
 38 | /**
 39 |  * Find the first existing SSH key from default locations
 40 |  */
 41 | function findDefaultSSHKey(): string | undefined {
 42 |   for (const keyPath of DEFAULT_SSH_KEYS) {
 43 |     if (fileExists(keyPath)) {
 44 |       return expandTilde(keyPath);
 45 |     }
 46 |   }
 47 |   return undefined;
 48 | }
 49 | 
 50 | /**
 51 |  * Parse SSH config file and extract configuration for a specific host
 52 |  * @param hostAlias The host alias to look up in the SSH config
 53 |  * @param configPath Path to SSH config file
 54 |  * @returns SSH tunnel configuration or null if not found
 55 |  */
 56 | export function parseSSHConfig(
 57 |   hostAlias: string,
 58 |   configPath: string
 59 | ): SSHTunnelConfig | null {
 60 |   const sshConfigPath = configPath;
 61 | 
 62 |   // Check if SSH config file exists
 63 |   if (!existsSync(sshConfigPath)) {
 64 |     return null;
 65 |   }
 66 | 
 67 |   try {
 68 |     // Read and parse SSH config file
 69 |     const configContent = readFileSync(sshConfigPath, 'utf8');
 70 |     const config = SSHConfig.parse(configContent);
 71 | 
 72 |     // Find configuration for the specified host
 73 |     const hostConfig = config.compute(hostAlias);
 74 |     
 75 |     // Check if we have a valid config (not just Include directives)
 76 |     if (!hostConfig || !hostConfig.HostName && !hostConfig.User) {
 77 |       return null;
 78 |     }
 79 | 
 80 |     // Extract SSH configuration parameters
 81 |     const sshConfig: Partial<SSHTunnelConfig> = {};
 82 | 
 83 |     // Host (required)
 84 |     if (hostConfig.HostName) {
 85 |       sshConfig.host = hostConfig.HostName;
 86 |     } else {
 87 |       // If no HostName specified, use the host alias itself
 88 |       sshConfig.host = hostAlias;
 89 |     }
 90 | 
 91 |     // Port (optional, default will be 22)
 92 |     if (hostConfig.Port) {
 93 |       sshConfig.port = parseInt(hostConfig.Port, 10);
 94 |     }
 95 | 
 96 |     // User (required)
 97 |     if (hostConfig.User) {
 98 |       sshConfig.username = hostConfig.User;
 99 |     }
100 | 
101 |     // IdentityFile (private key)
102 |     if (hostConfig.IdentityFile) {
103 |       // SSH config can have multiple IdentityFile entries, take the first one
104 |       const identityFile = Array.isArray(hostConfig.IdentityFile) 
105 |         ? hostConfig.IdentityFile[0] 
106 |         : hostConfig.IdentityFile;
107 |       
108 |       const expandedPath = expandTilde(identityFile);
109 |       if (fileExists(expandedPath)) {
110 |         sshConfig.privateKey = expandedPath;
111 |       }
112 |     }
113 | 
114 |     // If no IdentityFile specified or found, try default SSH keys
115 |     if (!sshConfig.privateKey) {
116 |       const defaultKey = findDefaultSSHKey();
117 |       if (defaultKey) {
118 |         sshConfig.privateKey = defaultKey;
119 |       }
120 |     }
121 | 
122 |     // ProxyJump support could be added in the future if needed
123 |     // Currently, we'll log a warning if ProxyJump is detected
124 |     if (hostConfig.ProxyJump || hostConfig.ProxyCommand) {
125 |       console.error('Warning: ProxyJump/ProxyCommand in SSH config is not yet supported by DBHub');
126 |     }
127 | 
128 |     // Validate that we have minimum required fields
129 |     if (!sshConfig.host || !sshConfig.username) {
130 |       return null;
131 |     }
132 | 
133 |     return sshConfig as SSHTunnelConfig;
134 |   } catch (error) {
135 |     console.error(`Error parsing SSH config: ${error instanceof Error ? error.message : String(error)}`);
136 |     return null;
137 |   }
138 | }
139 | 
140 | /**
141 |  * Check if a string looks like an SSH host alias (not an IP or domain)
142 |  * This is a heuristic to determine if we should look up the host in SSH config
143 |  */
144 | export function looksLikeSSHAlias(host: string): boolean {
145 |   // If it contains dots, it's likely a domain or IP
146 |   if (host.includes('.')) {
147 |     return false;
148 |   }
149 |   
150 |   // If it's all numbers (with possible colons for IPv6), it's likely an IP
151 |   if (/^[\d:]+$/.test(host)) {
152 |     return false;
153 |   }
154 |   
155 |   // Check for IPv6 addresses with hex characters
156 |   if (/^[0-9a-fA-F:]+$/.test(host) && host.includes(':')) {
157 |     return false;
158 |   }
159 |   
160 |   // Otherwise, treat it as a potential SSH alias
161 |   return true;
162 | }
```

--------------------------------------------------------------------------------
/src/config/__tests__/ssh-config-integration.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
  2 | import { resolveSSHConfig } from '../env.js';
  3 | import { homedir } from 'os';
  4 | import { join } from 'path';
  5 | import * as sshConfigParser from '../../utils/ssh-config-parser.js';
  6 | 
  7 | // Mock the ssh-config-parser module
  8 | vi.mock('../../utils/ssh-config-parser.js', () => ({
  9 |   parseSSHConfig: vi.fn(),
 10 |   looksLikeSSHAlias: vi.fn()
 11 | }));
 12 | 
 13 | describe('SSH Config Integration', () => {
 14 |   let originalArgs: string[];
 15 |   
 16 |   beforeEach(() => {
 17 |     // Save original values
 18 |     originalArgs = process.argv;
 19 |     
 20 |     // Clear mocks
 21 |     vi.clearAllMocks();
 22 |   });
 23 |   
 24 |   afterEach(() => {
 25 |     // Restore original values
 26 |     process.argv = originalArgs;
 27 |     
 28 |     // Clear any environment variables
 29 |     delete process.env.SSH_HOST;
 30 |     delete process.env.SSH_USER;
 31 |     delete process.env.SSH_PORT;
 32 |     delete process.env.SSH_KEY;
 33 |     delete process.env.SSH_PASSWORD;
 34 |   });
 35 |   
 36 |   it('should resolve SSH config from host alias', () => {
 37 |     // Mock the SSH config parser
 38 |     vi.mocked(sshConfigParser.looksLikeSSHAlias).mockReturnValue(true);
 39 |     vi.mocked(sshConfigParser.parseSSHConfig).mockImplementation((hostAlias: string, configPath: string) => ({
 40 |       host: 'bastion.example.com',
 41 |       username: 'ubuntu',
 42 |       port: 2222,
 43 |       privateKey: '/home/user/.ssh/id_rsa'
 44 |     }));
 45 |     
 46 |     // Simulate command line args
 47 |     process.argv = ['node', 'index.js', '--ssh-host=mybastion'];
 48 |     
 49 |     const result = resolveSSHConfig();
 50 |     
 51 |     expect(result).not.toBeNull();
 52 |     expect(result?.config).toMatchObject({
 53 |       host: 'bastion.example.com',
 54 |       username: 'ubuntu',
 55 |       port: 2222,
 56 |       privateKey: '/home/user/.ssh/id_rsa'
 57 |     });
 58 |     expect(result?.source).toContain('SSH config for host \'mybastion\'');
 59 |   });
 60 |   
 61 |   it('should allow command line to override SSH config values', () => {
 62 |     // Mock the SSH config parser
 63 |     vi.mocked(sshConfigParser.looksLikeSSHAlias).mockReturnValue(true);
 64 |     vi.mocked(sshConfigParser.parseSSHConfig).mockImplementation((hostAlias: string, configPath: string) => ({
 65 |       host: 'bastion.example.com',
 66 |       username: 'ubuntu',
 67 |       port: 2222,
 68 |       privateKey: '/home/user/.ssh/id_rsa'
 69 |     }));
 70 |     
 71 |     // Simulate command line args with override
 72 |     process.argv = ['node', 'index.js', '--ssh-host=mybastion', '--ssh-user=override-user'];
 73 |     
 74 |     const result = resolveSSHConfig();
 75 |     
 76 |     expect(result).not.toBeNull();
 77 |     expect(result?.config).toMatchObject({
 78 |       host: 'bastion.example.com',
 79 |       username: 'override-user', // Command line overrides config
 80 |       port: 2222,
 81 |       privateKey: '/home/user/.ssh/id_rsa'
 82 |     });
 83 |   });
 84 |   
 85 |   it('should work with environment variables', () => {
 86 |     // Mock the SSH config parser
 87 |     vi.mocked(sshConfigParser.looksLikeSSHAlias).mockReturnValue(true);
 88 |     vi.mocked(sshConfigParser.parseSSHConfig).mockImplementation((hostAlias: string, configPath: string) => ({
 89 |       host: 'bastion.example.com',
 90 |       username: 'ubuntu',
 91 |       port: 2222,
 92 |       privateKey: '/home/user/.ssh/id_rsa'
 93 |     }));
 94 |     
 95 |     process.env.SSH_HOST = 'mybastion';
 96 |     
 97 |     const result = resolveSSHConfig();
 98 |     
 99 |     expect(result).not.toBeNull();
100 |     expect(result?.config).toMatchObject({
101 |       host: 'bastion.example.com',
102 |       username: 'ubuntu',
103 |       port: 2222,
104 |       privateKey: '/home/user/.ssh/id_rsa'
105 |     });
106 |   });
107 |   
108 |   it('should not use SSH config for direct hostnames', () => {
109 |     // Mock the SSH config parser
110 |     vi.mocked(sshConfigParser.looksLikeSSHAlias).mockReturnValue(false);
111 |     
112 |     process.argv = ['node', 'index.js', '--ssh-host=direct.example.com', '--ssh-user=myuser', '--ssh-password=mypass'];
113 |     
114 |     const result = resolveSSHConfig();
115 |     
116 |     expect(result).not.toBeNull();
117 |     expect(result?.config).toMatchObject({
118 |       host: 'direct.example.com',
119 |       username: 'myuser',
120 |       password: 'mypass'
121 |     });
122 |     expect(result?.source).not.toContain('SSH config');
123 |     expect(sshConfigParser.parseSSHConfig).not.toHaveBeenCalled();
124 |   });
125 |   
126 |   it('should require SSH user when only host is provided', () => {
127 |     // Mock the SSH config parser to return null (no config found)
128 |     vi.mocked(sshConfigParser.looksLikeSSHAlias).mockReturnValue(true);
129 |     vi.mocked(sshConfigParser.parseSSHConfig).mockImplementation((hostAlias: string, configPath: string) => null);
130 |     
131 |     process.argv = ['node', 'index.js', '--ssh-host=unknown-host'];
132 |     
133 |     expect(() => resolveSSHConfig()).toThrow('SSH tunnel configuration requires at least --ssh-host and --ssh-user');
134 |   });
135 | });
```

--------------------------------------------------------------------------------
/src/utils/safe-url.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * SafeURL utility
  3 |  * 
  4 |  * Provides a safer alternative to URL constructor for database connections
  5 |  * that may contain special characters in passwords or other parts
  6 |  */
  7 | 
  8 | /**
  9 |  * Interface defining the structure of a URL parser
 10 |  * that can handle special characters in connection strings
 11 |  */
 12 | export interface ISafeURL {
 13 |   protocol: string;
 14 |   hostname: string;
 15 |   port: string;
 16 |   pathname: string;
 17 |   username: string;
 18 |   password: string;
 19 |   searchParams: Map<string, string>;
 20 |   
 21 |   getSearchParam(name: string): string | null;
 22 |   forEachSearchParam(callback: (value: string, key: string) => void): void;
 23 | }
 24 | 
 25 | /**
 26 |  * SafeURL class implements a parser for handling DSN strings
 27 |  * with special characters that might break the standard URL constructor
 28 |  */
 29 | export class SafeURL implements ISafeURL {
 30 |   protocol: string;
 31 |   hostname: string;
 32 |   port: string;
 33 |   pathname: string;
 34 |   username: string;
 35 |   password: string;
 36 |   searchParams: Map<string, string>;
 37 | 
 38 |   /**
 39 |    * Parse a URL and handle special characters in passwords
 40 |    * This is a safe alternative to the URL constructor
 41 |    * 
 42 |    * @param urlString - The DSN string to parse
 43 |    */
 44 |   constructor(urlString: string) {
 45 |     // Initialize with defaults
 46 |     this.protocol = '';
 47 |     this.hostname = '';
 48 |     this.port = '';
 49 |     this.pathname = '';
 50 |     this.username = '';
 51 |     this.password = '';
 52 |     this.searchParams = new Map<string, string>();
 53 | 
 54 |     // Validate URL string
 55 |     if (!urlString || urlString.trim() === '') {
 56 |       throw new Error('URL string cannot be empty');
 57 |     }
 58 | 
 59 |     try {
 60 |       // Extract protocol
 61 |       const protocolSeparator: number = urlString.indexOf('://');
 62 |       if (protocolSeparator !== -1) {
 63 |         this.protocol = urlString.substring(0, protocolSeparator + 1); // includes the colon
 64 |         urlString = urlString.substring(protocolSeparator + 3); // rest after ://
 65 |       } else {
 66 |         throw new Error('Invalid URL format: missing protocol (e.g., "mysql://")');
 67 |       }
 68 | 
 69 |       // Extract query params if any
 70 |       const questionMarkIndex: number = urlString.indexOf('?');
 71 |       let queryParams: string = '';
 72 |       if (questionMarkIndex !== -1) {
 73 |         queryParams = urlString.substring(questionMarkIndex + 1);
 74 |         urlString = urlString.substring(0, questionMarkIndex);
 75 | 
 76 |         // Parse query parameters
 77 |         queryParams.split('&').forEach(pair => {
 78 |           const parts: string[] = pair.split('=');
 79 |           if (parts.length === 2 && parts[0] && parts[1]) {
 80 |             this.searchParams.set(parts[0], decodeURIComponent(parts[1]));
 81 |           }
 82 |         });
 83 |       }
 84 | 
 85 |       // Extract authentication
 86 |       const atIndex: number = urlString.indexOf('@');
 87 |       if (atIndex !== -1) {
 88 |         const auth: string = urlString.substring(0, atIndex);
 89 |         urlString = urlString.substring(atIndex + 1);
 90 | 
 91 |         // Split into username and password
 92 |         const colonIndex: number = auth.indexOf(':');
 93 |         if (colonIndex !== -1) {
 94 |           this.username = auth.substring(0, colonIndex);
 95 |           this.password = auth.substring(colonIndex + 1);
 96 |           
 97 |           // Decode username and password
 98 |           this.username = decodeURIComponent(this.username);
 99 |           this.password = decodeURIComponent(this.password);
100 |         } else {
101 |           this.username = auth;
102 |         }
103 |       }
104 | 
105 |       // Extract pathname
106 |       const pathSeparatorIndex: number = urlString.indexOf('/');
107 |       if (pathSeparatorIndex !== -1) {
108 |         this.pathname = urlString.substring(pathSeparatorIndex);
109 |         urlString = urlString.substring(0, pathSeparatorIndex);
110 |       }
111 | 
112 |       // Extract hostname and port
113 |       const colonIndex: number = urlString.indexOf(':');
114 |       if (colonIndex !== -1) {
115 |         this.hostname = urlString.substring(0, colonIndex);
116 |         this.port = urlString.substring(colonIndex + 1);
117 |       } else {
118 |         this.hostname = urlString;
119 |       }
120 |       
121 |       // Additional validation
122 |       if (this.protocol === '') {
123 |         throw new Error('Invalid URL: protocol is required');
124 |       }
125 |     } catch (error) {
126 |       throw new Error(`Failed to parse URL: ${error instanceof Error ? error.message : String(error)}`);
127 |     }
128 |   }
129 | 
130 |   /**
131 |    * Helper method to safely get a parameter from query string
132 |    * 
133 |    * @param name - The parameter name to retrieve
134 |    * @returns The parameter value or null if not found
135 |    */
136 |   getSearchParam(name: string): string | null {
137 |     return this.searchParams.has(name) ? this.searchParams.get(name) as string : null;
138 |   }
139 | 
140 |   /**
141 |    * Helper method to iterate over all parameters
142 |    * 
143 |    * @param callback - Function to call for each parameter
144 |    */
145 |   forEachSearchParam(callback: (value: string, key: string) => void): void {
146 |     this.searchParams.forEach((value, key) => callback(value, key));
147 |   }
148 | }
149 | 
```

--------------------------------------------------------------------------------
/src/utils/ssh-tunnel.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { Client, ConnectConfig } from 'ssh2';
  2 | import { readFileSync } from 'fs';
  3 | import { Server, createServer } from 'net';
  4 | import type { SSHTunnelConfig, SSHTunnelOptions, SSHTunnelInfo } from '../types/ssh.js';
  5 | 
  6 | /**
  7 |  * SSH Tunnel implementation for secure database connections
  8 |  */
  9 | export class SSHTunnel {
 10 |   private sshClient: Client | null = null;
 11 |   private localServer: Server | null = null;
 12 |   private tunnelInfo: SSHTunnelInfo | null = null;
 13 |   private isConnected: boolean = false;
 14 | 
 15 |   /**
 16 |    * Establish an SSH tunnel
 17 |    * @param config SSH connection configuration
 18 |    * @param options Tunnel options including target host and port
 19 |    * @returns Promise resolving to tunnel information including local port
 20 |    */
 21 |   async establish(
 22 |     config: SSHTunnelConfig, 
 23 |     options: SSHTunnelOptions
 24 |   ): Promise<SSHTunnelInfo> {
 25 |     if (this.isConnected) {
 26 |       throw new Error('SSH tunnel is already established');
 27 |     }
 28 | 
 29 |     return new Promise((resolve, reject) => {
 30 |       this.sshClient = new Client();
 31 | 
 32 |       // Build SSH connection config
 33 |       const sshConfig: ConnectConfig = {
 34 |         host: config.host,
 35 |         port: config.port || 22,
 36 |         username: config.username,
 37 |       };
 38 | 
 39 |       // Configure authentication
 40 |       if (config.password) {
 41 |         sshConfig.password = config.password;
 42 |       } else if (config.privateKey) {
 43 |         try {
 44 |           const privateKey = readFileSync(config.privateKey);
 45 |           sshConfig.privateKey = privateKey;
 46 |           if (config.passphrase) {
 47 |             sshConfig.passphrase = config.passphrase;
 48 |           }
 49 |         } catch (error) {
 50 |           reject(new Error(`Failed to read private key file: ${error instanceof Error ? error.message : String(error)}`));
 51 |           return;
 52 |         }
 53 |       } else {
 54 |         reject(new Error('Either password or privateKey must be provided for SSH authentication'));
 55 |         return;
 56 |       }
 57 | 
 58 |       // Handle SSH connection errors
 59 |       this.sshClient.on('error', (err) => {
 60 |         this.cleanup();
 61 |         reject(new Error(`SSH connection error: ${err.message}`));
 62 |       });
 63 | 
 64 |       // When SSH connection is ready, create the tunnel
 65 |       this.sshClient.on('ready', () => {
 66 |         console.error('SSH connection established');
 67 | 
 68 |         // Create local server for the tunnel
 69 |         this.localServer = createServer((localSocket) => {
 70 |           this.sshClient!.forwardOut(
 71 |             '127.0.0.1',
 72 |             0,
 73 |             options.targetHost,
 74 |             options.targetPort,
 75 |             (err, stream) => {
 76 |               if (err) {
 77 |                 console.error('SSH forward error:', err);
 78 |                 localSocket.end();
 79 |                 return;
 80 |               }
 81 | 
 82 |               // Pipe data between local socket and SSH stream
 83 |               localSocket.pipe(stream).pipe(localSocket);
 84 | 
 85 |               // Handle stream errors
 86 |               stream.on('error', (err) => {
 87 |                 console.error('SSH stream error:', err);
 88 |                 localSocket.end();
 89 |               });
 90 | 
 91 |               localSocket.on('error', (err) => {
 92 |                 console.error('Local socket error:', err);
 93 |                 stream.end();
 94 |               });
 95 |             }
 96 |           );
 97 |         });
 98 | 
 99 |         // Listen on local port
100 |         const localPort = options.localPort || 0;
101 |         this.localServer.listen(localPort, '127.0.0.1', () => {
102 |           const address = this.localServer!.address();
103 |           if (!address || typeof address === 'string') {
104 |             this.cleanup();
105 |             reject(new Error('Failed to get local server address'));
106 |             return;
107 |           }
108 | 
109 |           this.tunnelInfo = {
110 |             localPort: address.port,
111 |             targetHost: options.targetHost,
112 |             targetPort: options.targetPort,
113 |           };
114 | 
115 |           this.isConnected = true;
116 |           console.error(`SSH tunnel established: localhost:${address.port} -> ${options.targetHost}:${options.targetPort}`);
117 |           resolve(this.tunnelInfo);
118 |         });
119 | 
120 |         // Handle local server errors
121 |         this.localServer.on('error', (err) => {
122 |           this.cleanup();
123 |           reject(new Error(`Local server error: ${err.message}`));
124 |         });
125 |       });
126 | 
127 |       // Connect to SSH server
128 |       this.sshClient.connect(sshConfig);
129 |     });
130 |   }
131 | 
132 |   /**
133 |    * Close the SSH tunnel and clean up resources
134 |    */
135 |   async close(): Promise<void> {
136 |     if (!this.isConnected) {
137 |       return;
138 |     }
139 | 
140 |     return new Promise((resolve) => {
141 |       this.cleanup();
142 |       this.isConnected = false;
143 |       console.error('SSH tunnel closed');
144 |       resolve();
145 |     });
146 |   }
147 | 
148 |   /**
149 |    * Clean up resources
150 |    */
151 |   private cleanup(): void {
152 |     if (this.localServer) {
153 |       this.localServer.close();
154 |       this.localServer = null;
155 |     }
156 | 
157 |     if (this.sshClient) {
158 |       this.sshClient.end();
159 |       this.sshClient = null;
160 |     }
161 | 
162 |     this.tunnelInfo = null;
163 |   }
164 | 
165 |   /**
166 |    * Get current tunnel information
167 |    */
168 |   getTunnelInfo(): SSHTunnelInfo | null {
169 |     return this.tunnelInfo;
170 |   }
171 | 
172 |   /**
173 |    * Check if tunnel is connected
174 |    */
175 |   getIsConnected(): boolean {
176 |     return this.isConnected;
177 |   }
178 | }
```

--------------------------------------------------------------------------------
/src/utils/__tests__/ssh-config-parser.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach } from 'vitest';
  2 | import { parseSSHConfig, looksLikeSSHAlias } from '../ssh-config-parser.js';
  3 | import { mkdtempSync, writeFileSync, rmSync } from 'fs';
  4 | import { tmpdir } from 'os';
  5 | import { join } from 'path';
  6 | 
  7 | describe('SSH Config Parser', () => {
  8 |   let tempDir: string;
  9 |   let configPath: string;
 10 | 
 11 |   beforeEach(() => {
 12 |     // Create a temporary directory for test config files
 13 |     tempDir = mkdtempSync(join(tmpdir(), 'dbhub-ssh-test-'));
 14 |     configPath = join(tempDir, 'config');
 15 |   });
 16 | 
 17 |   afterEach(() => {
 18 |     // Clean up temporary directory
 19 |     rmSync(tempDir, { recursive: true });
 20 |   });
 21 | 
 22 |   describe('parseSSHConfig', () => {
 23 |     it('should parse basic SSH config', () => {
 24 |       const configContent = `
 25 | Host myserver
 26 |   HostName 192.168.1.100
 27 |   User johndoe
 28 |   Port 2222
 29 | `;
 30 |       writeFileSync(configPath, configContent);
 31 | 
 32 |       const result = parseSSHConfig('myserver', configPath);
 33 |       expect(result).toEqual({
 34 |         host: '192.168.1.100',
 35 |         username: 'johndoe',
 36 |         port: 2222
 37 |       });
 38 |     });
 39 | 
 40 |     it('should handle identity file', () => {
 41 |       const identityPath = join(tempDir, 'id_rsa');
 42 |       writeFileSync(identityPath, 'fake-key-content');
 43 |       
 44 |       const configContent = `
 45 | Host dev-server
 46 |   HostName dev.example.com
 47 |   User developer
 48 |   IdentityFile ${identityPath}
 49 | `;
 50 |       writeFileSync(configPath, configContent);
 51 | 
 52 |       const result = parseSSHConfig('dev-server', configPath);
 53 |       expect(result).toEqual({
 54 |         host: 'dev.example.com',
 55 |         username: 'developer',
 56 |         privateKey: identityPath
 57 |       });
 58 |     });
 59 | 
 60 |     it('should handle multiple identity files and use the first one', () => {
 61 |       const identityPath1 = join(tempDir, 'id_rsa');
 62 |       const identityPath2 = join(tempDir, 'id_ed25519');
 63 |       writeFileSync(identityPath1, 'fake-key-1');
 64 |       writeFileSync(identityPath2, 'fake-key-2');
 65 |       
 66 |       const configContent = `
 67 | Host multi-key
 68 |   HostName multi.example.com
 69 |   User multiuser
 70 |   IdentityFile ${identityPath1}
 71 |   IdentityFile ${identityPath2}
 72 | `;
 73 |       writeFileSync(configPath, configContent);
 74 | 
 75 |       const result = parseSSHConfig('multi-key', configPath);
 76 |       expect(result?.privateKey).toBe(identityPath1);
 77 |     });
 78 | 
 79 |     it('should handle wildcard patterns', () => {
 80 |       const configContent = `
 81 | Host *.example.com
 82 |   User defaultuser
 83 |   Port 2222
 84 | 
 85 | Host prod.example.com
 86 |   HostName 10.0.0.100
 87 | `;
 88 |       writeFileSync(configPath, configContent);
 89 | 
 90 |       const result = parseSSHConfig('prod.example.com', configPath);
 91 |       expect(result).toEqual({
 92 |         host: '10.0.0.100',
 93 |         username: 'defaultuser',
 94 |         port: 2222
 95 |       });
 96 |     });
 97 | 
 98 |     it('should use host alias as hostname if HostName not specified', () => {
 99 |       const configContent = `
100 | Host myalias
101 |   User testuser
102 | `;
103 |       writeFileSync(configPath, configContent);
104 | 
105 |       const result = parseSSHConfig('myalias', configPath);
106 |       expect(result).toEqual({
107 |         host: 'myalias',
108 |         username: 'testuser'
109 |       });
110 |     });
111 | 
112 |     it('should return null for non-existent host', () => {
113 |       const configContent = `
114 | Host myserver
115 |   HostName 192.168.1.100
116 |   User johndoe
117 | `;
118 |       writeFileSync(configPath, configContent);
119 | 
120 |       const result = parseSSHConfig('nonexistent', configPath);
121 |       expect(result).toBeNull();
122 |     });
123 | 
124 |     it('should return null if config file does not exist', () => {
125 |       const result = parseSSHConfig('myserver', '/non/existent/path');
126 |       expect(result).toBeNull();
127 |     });
128 | 
129 |     it('should return null if required fields are missing', () => {
130 |       const configContent = `
131 | Host incomplete
132 |   HostName 192.168.1.100
133 | `;
134 |       writeFileSync(configPath, configContent);
135 | 
136 |       const result = parseSSHConfig('incomplete', configPath);
137 |       expect(result).toBeNull();
138 |     });
139 | 
140 |     it('should handle tilde expansion in identity file', () => {
141 |       // Mock a key file that would exist in home directory
142 |       const mockKeyPath = join(tempDir, 'mock_id_rsa');
143 |       writeFileSync(mockKeyPath, 'fake-key');
144 |       
145 |       const configContent = `
146 | Host tilde-test
147 |   HostName tilde.example.com
148 |   User tildeuser
149 |   IdentityFile ${mockKeyPath}
150 | `;
151 |       writeFileSync(configPath, configContent);
152 | 
153 |       const result = parseSSHConfig('tilde-test', configPath);
154 |       expect(result?.privateKey).toBe(mockKeyPath);
155 |     });
156 |   });
157 | 
158 |   describe('looksLikeSSHAlias', () => {
159 |     it('should return true for simple hostnames', () => {
160 |       expect(looksLikeSSHAlias('myserver')).toBe(true);
161 |       expect(looksLikeSSHAlias('dev-box')).toBe(true);
162 |       expect(looksLikeSSHAlias('prod_server')).toBe(true);
163 |     });
164 | 
165 |     it('should return false for domains', () => {
166 |       expect(looksLikeSSHAlias('example.com')).toBe(false);
167 |       expect(looksLikeSSHAlias('sub.example.com')).toBe(false);
168 |       expect(looksLikeSSHAlias('my.local.dev')).toBe(false);
169 |     });
170 | 
171 |     it('should return false for IP addresses', () => {
172 |       expect(looksLikeSSHAlias('192.168.1.1')).toBe(false);
173 |       expect(looksLikeSSHAlias('10.0.0.1')).toBe(false);
174 |       expect(looksLikeSSHAlias('::1')).toBe(false);
175 |       expect(looksLikeSSHAlias('2001:db8::1')).toBe(false);
176 |     });
177 |   });
178 | });
```

--------------------------------------------------------------------------------
/src/connectors/manager.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { Connector, ConnectorType, ConnectorRegistry, ExecuteOptions } from "./interface.js";
  2 | import { SSHTunnel } from "../utils/ssh-tunnel.js";
  3 | import { resolveSSHConfig, resolveMaxRows } from "../config/env.js";
  4 | import type { SSHTunnelConfig } from "../types/ssh.js";
  5 | 
  6 | // Singleton instance for global access
  7 | let managerInstance: ConnectorManager | null = null;
  8 | 
  9 | /**
 10 |  * Manages database connectors and provides a unified interface to work with them
 11 |  */
 12 | export class ConnectorManager {
 13 |   private activeConnector: Connector | null = null;
 14 |   private connected = false;
 15 |   private sshTunnel: SSHTunnel | null = null;
 16 |   private originalDSN: string | null = null;
 17 |   private maxRows: number | null = null;
 18 | 
 19 |   constructor() {
 20 |     if (!managerInstance) {
 21 |       managerInstance = this;
 22 |     }
 23 |     
 24 |     // Initialize maxRows from command line arguments
 25 |     const maxRowsData = resolveMaxRows();
 26 |     if (maxRowsData) {
 27 |       this.maxRows = maxRowsData.maxRows;
 28 |       console.error(`Max rows limit: ${this.maxRows} (from ${maxRowsData.source})`);
 29 |     }
 30 |   }
 31 | 
 32 |   /**
 33 |    * Initialize and connect to the database using a DSN
 34 |    */
 35 |   async connectWithDSN(dsn: string, initScript?: string): Promise<void> {
 36 |     // Store original DSN for reference
 37 |     this.originalDSN = dsn;
 38 |     
 39 |     // Check if SSH tunnel is needed
 40 |     const sshConfig = resolveSSHConfig();
 41 |     let actualDSN = dsn;
 42 |     
 43 |     if (sshConfig) {
 44 |       console.error(`SSH tunnel configuration loaded from ${sshConfig.source}`);
 45 |       
 46 |       // Parse DSN to get database host and port
 47 |       const url = new URL(dsn);
 48 |       const targetHost = url.hostname;
 49 |       const targetPort = parseInt(url.port) || this.getDefaultPort(dsn);
 50 |       
 51 |       // Create and establish SSH tunnel
 52 |       this.sshTunnel = new SSHTunnel();
 53 |       const tunnelInfo = await this.sshTunnel.establish(sshConfig.config, {
 54 |         targetHost,
 55 |         targetPort,
 56 |       });
 57 |       
 58 |       // Update DSN to use local tunnel endpoint
 59 |       url.hostname = '127.0.0.1';
 60 |       url.port = tunnelInfo.localPort.toString();
 61 |       actualDSN = url.toString();
 62 |       
 63 |       console.error(`Database connection will use SSH tunnel through localhost:${tunnelInfo.localPort}`);
 64 |     }
 65 | 
 66 |     // First try to find a connector that can handle this DSN
 67 |     let connector = ConnectorRegistry.getConnectorForDSN(actualDSN);
 68 | 
 69 |     if (!connector) {
 70 |       throw new Error(`No connector found that can handle the DSN: ${actualDSN}`);
 71 |     }
 72 | 
 73 |     this.activeConnector = connector;
 74 | 
 75 |     // Connect to the database through tunnel if applicable
 76 |     await this.activeConnector.connect(actualDSN, initScript);
 77 |     this.connected = true;
 78 |   }
 79 | 
 80 |   /**
 81 |    * Initialize and connect to the database using a specific connector type
 82 |    */
 83 |   async connectWithType(connectorType: ConnectorType, dsn?: string): Promise<void> {
 84 |     // Get the connector from the registry
 85 |     const connector = ConnectorRegistry.getConnector(connectorType);
 86 | 
 87 |     if (!connector) {
 88 |       throw new Error(`Connector "${connectorType}" not found`);
 89 |     }
 90 | 
 91 |     this.activeConnector = connector;
 92 | 
 93 |     // Use provided DSN or get sample DSN
 94 |     const connectionString = dsn || connector.dsnParser.getSampleDSN();
 95 | 
 96 |     // Connect to the database
 97 |     await this.activeConnector.connect(connectionString);
 98 |     this.connected = true;
 99 |   }
100 | 
101 |   /**
102 |    * Close the database connection
103 |    */
104 |   async disconnect(): Promise<void> {
105 |     if (this.activeConnector && this.connected) {
106 |       await this.activeConnector.disconnect();
107 |       this.connected = false;
108 |     }
109 |     
110 |     // Close SSH tunnel if it exists
111 |     if (this.sshTunnel) {
112 |       await this.sshTunnel.close();
113 |       this.sshTunnel = null;
114 |     }
115 |     
116 |     this.originalDSN = null;
117 |   }
118 | 
119 |   /**
120 |    * Get the active connector
121 |    */
122 |   getConnector(): Connector {
123 |     if (!this.activeConnector) {
124 |       throw new Error("No active connector. Call connectWithDSN() or connectWithType() first.");
125 |     }
126 |     return this.activeConnector;
127 |   }
128 | 
129 |   /**
130 |    * Check if there's an active connection
131 |    */
132 |   isConnected(): boolean {
133 |     return this.connected;
134 |   }
135 | 
136 |   /**
137 |    * Get all available connector types
138 |    */
139 |   static getAvailableConnectors(): ConnectorType[] {
140 |     return ConnectorRegistry.getAvailableConnectors();
141 |   }
142 | 
143 |   /**
144 |    * Get sample DSNs for all available connectors
145 |    */
146 |   static getAllSampleDSNs(): { [key in ConnectorType]?: string } {
147 |     return ConnectorRegistry.getAllSampleDSNs();
148 |   }
149 | 
150 |   /**
151 |    * Get the current active connector instance
152 |    * This is used by resource and tool handlers
153 |    */
154 |   static getCurrentConnector(): Connector {
155 |     if (!managerInstance) {
156 |       throw new Error("ConnectorManager not initialized");
157 |     }
158 |     return managerInstance.getConnector();
159 |   }
160 | 
161 |   /**
162 |    * Get execute options for SQL execution
163 |    */
164 |   getExecuteOptions(): ExecuteOptions {
165 |     const options: ExecuteOptions = {};
166 |     if (this.maxRows !== null) {
167 |       options.maxRows = this.maxRows;
168 |     }
169 |     return options;
170 |   }
171 | 
172 |   /**
173 |    * Get the current execute options
174 |    * This is used by tool handlers
175 |    */
176 |   static getCurrentExecuteOptions(): ExecuteOptions {
177 |     if (!managerInstance) {
178 |       throw new Error("ConnectorManager not initialized");
179 |     }
180 |     return managerInstance.getExecuteOptions();
181 |   }
182 |   
183 |   /**
184 |    * Get default port for a database based on DSN protocol
185 |    */
186 |   private getDefaultPort(dsn: string): number {
187 |     if (dsn.startsWith('postgres://') || dsn.startsWith('postgresql://')) {
188 |       return 5432;
189 |     } else if (dsn.startsWith('mysql://')) {
190 |       return 3306;
191 |     } else if (dsn.startsWith('mariadb://')) {
192 |       return 3306;
193 |     } else if (dsn.startsWith('sqlserver://')) {
194 |       return 1433;
195 |     }
196 |     // SQLite doesn't use ports
197 |     return 0;
198 |   }
199 | }
200 | 
```

--------------------------------------------------------------------------------
/.github/workflows/npm-publish.yml:
--------------------------------------------------------------------------------

```yaml
  1 | # Workflow for publishing the DBHub package to npm
  2 | # This workflow has two trigger modes:
  3 | #
  4 | # 1. Manual trigger (workflow_dispatch):
  5 | #    - Allows manually specifying version and tag
  6 | #    - Useful for deliberate releases
  7 | #
  8 | # 2. Automatic trigger (on push to main branch that modifies package.json):
  9 | #    - Detects if the version has changed
 10 | #    - Automatically determines the appropriate npm tag based on version format
 11 | #    - Skips publishing if the version already exists on npm
 12 | 
 13 | name: Publish to npm
 14 | 
 15 | on:
 16 |   # Manual trigger with customizable version and tag
 17 |   workflow_dispatch:
 18 |     inputs:
 19 |       version:
 20 |         description: "Version to publish (e.g., 0.1.0, 0.2.0-beta)"
 21 |         required: false
 22 |         default: ""
 23 |       tag:
 24 |         description: "NPM tag (e.g., latest, dev)"
 25 |         required: false
 26 |         default: "dev"
 27 |   
 28 |   # Automatic trigger when package.json changes in main branch
 29 |   push:
 30 |     branches:
 31 |       - main
 32 |     paths:
 33 |       - 'package.json'
 34 | 
 35 | jobs:
 36 |   build-and-publish:
 37 |     runs-on: ubuntu-latest
 38 |     steps:
 39 |       # Checkout the repository to get access to the code
 40 |       - name: Checkout repository
 41 |         uses: actions/checkout@v4
 42 | 
 43 |       # Set up Node.js with npm registry configuration
 44 |       - name: Setup Node.js
 45 |         uses: actions/setup-node@v4
 46 |         with:
 47 |           node-version: "22"
 48 |           registry-url: "https://registry.npmjs.org/" # Use the public npm registry
 49 |           scope: "@bytebase"                          # Set the npm scope for publishing
 50 | 
 51 |       # Install pnpm for faster and more reliable package management
 52 |       - name: Install pnpm
 53 |         uses: pnpm/action-setup@v3
 54 |         with:
 55 |           version: latest
 56 | 
 57 |       # Install project dependencies
 58 |       - name: Install dependencies
 59 |         run: pnpm install
 60 | 
 61 |       # Build the project (compile TypeScript to JavaScript)
 62 |       - name: Build
 63 |         run: pnpm run build
 64 | 
 65 |       # Determine if we need to publish and what version/tag to use
 66 |       - name: Check version and prepare for publishing
 67 |         run: |
 68 |           # Get current version from package.json
 69 |           CURRENT_VERSION=$(jq -r '.version' package.json)
 70 |           
 71 |           # CASE 1: Manual workflow trigger with specified version
 72 |           if [ -n "${{ inputs.version }}" ]; then
 73 |             VERSION="${{ inputs.version }}"
 74 |             TAG="${{ inputs.tag }}"
 75 |             SHOULD_PUBLISH="true"
 76 |             echo "Manual trigger: Using provided version ${VERSION} with tag ${TAG}"
 77 |           
 78 |           # CASE 2: Automatic trigger from package.json changes
 79 |           else
 80 |             VERSION="${CURRENT_VERSION}"
 81 |             
 82 |             # Check if this version already exists in npm registry to avoid duplicates
 83 |             if npm view @bytebase/dbhub@${VERSION} version &> /dev/null; then
 84 |               echo "Version ${VERSION} already exists in npm registry. Skipping publish."
 85 |               SHOULD_PUBLISH="false"
 86 |             else
 87 |               echo "Version ${VERSION} is new. Proceeding with publish."
 88 |               SHOULD_PUBLISH="true"
 89 |               
 90 |               # Determine appropriate npm tag based on version format:
 91 |               # - For prerelease versions like "0.1.0-beta", use "beta" as the tag
 92 |               # - For stable versions like "1.0.0", use "latest" as the tag
 93 |               if [[ "${VERSION}" == *"-"* ]]; then
 94 |                 # Extract tag from version string (e.g., "beta" from "0.1.0-beta")
 95 |                 TAG=$(echo "${VERSION}" | cut -d'-' -f2 | cut -d'.' -f1)
 96 |                 echo "Prerelease version detected. Using '${TAG}' npm tag."
 97 |               else
 98 |                 TAG="latest"
 99 |                 echo "Stable version detected. Using 'latest' npm tag."
100 |               fi
101 |             fi
102 |           fi
103 |           
104 |           # Store values as environment variables for use in later steps
105 |           echo "PACKAGE_VERSION=${VERSION}" >> $GITHUB_ENV
106 |           echo "NPM_TAG=${TAG}" >> $GITHUB_ENV
107 |           echo "SHOULD_PUBLISH=${SHOULD_PUBLISH}" >> $GITHUB_ENV
108 |           
109 |           # Summary message
110 |           if [ "${SHOULD_PUBLISH}" = "true" ]; then
111 |             echo "Publishing version: ${VERSION} with tag: ${TAG}"
112 |           fi
113 | 
114 |           # Only modify package.json if we're going to publish
115 |           if [ "${SHOULD_PUBLISH}" = "true" ]; then
116 |             # Step 1: Update package name and version
117 |             echo "Preparing package.json for publishing..."
118 |             jq --arg version "$VERSION" '.name = "@bytebase/dbhub" | .version = $version' package.json > package.json.tmp
119 |             mv package.json.tmp package.json
120 |             
121 |             # Step 2: Configure which files to include in the published package
122 |             echo "Setting files to include in the npm package..."
123 |             jq '.files = ["dist/**/*", "LICENSE", "README.md"]' package.json > package.json.tmp
124 |             mv package.json.tmp package.json
125 | 
126 |             # Step 3: Add binary entry for CLI usage (makes it executable with 'npx' or after global install)
127 |             echo "Adding bin entry for CLI usage..."
128 |             jq '.bin = {"dbhub": "dist/index.js"}' package.json > package.json.tmp 
129 |             mv package.json.tmp package.json
130 |             
131 |             echo "Package.json prepared successfully for publishing"
132 |           else
133 |             echo "Skipping package.json modifications as we won't be publishing"
134 |           fi
135 | 
136 |       # Publish the package to npm if conditions are met
137 |       - name: Publish to npm
138 |         if: env.SHOULD_PUBLISH == 'true'
139 |         run: |
140 |           echo "Publishing @bytebase/dbhub@${{ env.PACKAGE_VERSION }} with tag ${{ env.NPM_TAG }}..."
141 |           pnpm publish --no-git-checks --access public --tag ${{ env.NPM_TAG }}
142 |           echo "✅ Successfully published to npm!"
143 |         env:
144 |           # Uses NPM_TOKEN from repository secrets for authentication
145 |           NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
146 |           
147 |       # Display a message when skipping publication
148 |       - name: Skip publishing
149 |         if: env.SHOULD_PUBLISH != 'true'
150 |         run: |
151 |           echo "⏭️ Skipping publish step because:"
152 |           echo "   - Version has not changed, or"
153 |           echo "   - Version already exists in the npm registry"
154 |           echo "To force publication, use the manual workflow trigger with a custom version."
155 | 
```

--------------------------------------------------------------------------------
/src/connectors/interface.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Type definition for supported database connector types
  3 |  */
  4 | export type ConnectorType = "postgres" | "mysql" | "mariadb" | "sqlite" | "sqlserver";
  5 | 
  6 | /**
  7 |  * Database Connector Interface
  8 |  * This defines the contract that all database connectors must implement.
  9 |  */
 10 | export interface SQLResult {
 11 |   rows: any[];
 12 |   [key: string]: any;
 13 | }
 14 | 
 15 | export interface TableColumn {
 16 |   column_name: string;
 17 |   data_type: string;
 18 |   is_nullable: string;
 19 |   column_default: string | null;
 20 | }
 21 | 
 22 | export interface TableIndex {
 23 |   index_name: string;
 24 |   column_names: string[];
 25 |   is_unique: boolean;
 26 |   is_primary: boolean;
 27 | }
 28 | 
 29 | export interface StoredProcedure {
 30 |   procedure_name: string;
 31 |   procedure_type: "procedure" | "function";
 32 |   language: string;
 33 |   parameter_list: string;
 34 |   return_type?: string;
 35 |   definition?: string;
 36 | }
 37 | 
 38 | /**
 39 |  * Options for SQL execution
 40 |  * This interface allows passing execution-specific options to connectors
 41 |  */
 42 | export interface ExecuteOptions {
 43 |   /** Maximum number of rows to return (applied via database-native LIMIT) */
 44 |   maxRows?: number;
 45 | }
 46 | 
 47 | /**
 48 |  * Connection string (DSN) parser interface
 49 |  * Each connector needs to implement its own DSN parser
 50 |  */
 51 | export interface DSNParser {
 52 |   /**
 53 |    * Parse a connection string into connector-specific configuration
 54 |    * Example DSN formats:
 55 |    * - PostgreSQL: "postgres://user:password@localhost:5432/dbname?sslmode=disable"
 56 |    * - MariaDB: "mariadb://user:password@localhost:3306/dbname"
 57 |    * - MySQL: "mysql://user:password@localhost:3306/dbname"
 58 |    * - SQLite: "sqlite:///path/to/database.db" or "sqlite:///:memory:"
 59 |    */
 60 |   parse(dsn: string): Promise<any>;
 61 | 
 62 |   /**
 63 |    * Generate a sample DSN string for this connector type
 64 |    */
 65 |   getSampleDSN(): string;
 66 | 
 67 |   /**
 68 |    * Check if a DSN is valid for this connector
 69 |    */
 70 |   isValidDSN(dsn: string): boolean;
 71 | }
 72 | 
 73 | export interface Connector {
 74 |   /** A unique identifier for the connector */
 75 |   id: ConnectorType;
 76 | 
 77 |   /** Human-readable name of the connector */
 78 |   name: string;
 79 | 
 80 |   /** DSN parser for this connector */
 81 |   dsnParser: DSNParser;
 82 | 
 83 |   /** Connect to the database using DSN, with optional init script */
 84 |   connect(dsn: string, initScript?: string): Promise<void>;
 85 | 
 86 |   /** Close the connection */
 87 |   disconnect(): Promise<void>;
 88 | 
 89 |   /**
 90 |    * Get all schemas in the database
 91 |    * @returns Promise with array of schema names
 92 |    */
 93 |   getSchemas(): Promise<string[]>;
 94 | 
 95 |   /**
 96 |    * Get all tables in the database or in a specific schema
 97 |    * @param schema Optional schema name. If not provided, implementation should use the default schema:
 98 |    *   - PostgreSQL: 'public' schema
 99 |    *   - SQL Server: 'dbo' schema
100 |    *   - MySQL: Current active database from connection (DATABASE())
101 |    *   - SQLite: Main database (schema concept doesn't exist in SQLite)
102 |    * @returns Promise with array of table names
103 |    */
104 |   getTables(schema?: string): Promise<string[]>;
105 | 
106 |   /**
107 |    * Get schema information for a specific table
108 |    * @param tableName The name of the table to get schema information for
109 |    * @param schema Optional schema name. If not provided, implementation should use the default schema
110 |    *   as described in getTables method.
111 |    * @returns Promise with array of column information
112 |    */
113 |   getTableSchema(tableName: string, schema?: string): Promise<TableColumn[]>;
114 | 
115 |   /**
116 |    * Check if a table exists
117 |    * @param tableName The name of the table to check
118 |    * @param schema Optional schema name. If not provided, implementation should use the default schema
119 |    *   as described in getTables method.
120 |    * @returns Promise with boolean indicating if table exists
121 |    */
122 |   tableExists(tableName: string, schema?: string): Promise<boolean>;
123 | 
124 |   /**
125 |    * Get indexes for a specific table
126 |    * @param tableName The name of the table to get indexes for
127 |    * @param schema Optional schema name. If not provided, implementation should use the default schema
128 |    *   as described in getTables method.
129 |    * @returns Promise with array of index information
130 |    */
131 |   getTableIndexes(tableName: string, schema?: string): Promise<TableIndex[]>;
132 | 
133 |   /**
134 |    * Get stored procedures/functions in the database or in a specific schema
135 |    * @param schema Optional schema name. If not provided, implementation should use the default schema
136 |    * @returns Promise with array of stored procedure/function names
137 |    */
138 |   getStoredProcedures(schema?: string): Promise<string[]>;
139 | 
140 |   /**
141 |    * Get details for a specific stored procedure/function
142 |    * @param procedureName The name of the procedure/function to get details for
143 |    * @param schema Optional schema name. If not provided, implementation should use the default schema
144 |    * @returns Promise with stored procedure details
145 |    */
146 |   getStoredProcedureDetail(procedureName: string, schema?: string): Promise<StoredProcedure>;
147 | 
148 |   /** Execute a SQL query with execution options */
149 |   executeSQL(sql: string, options: ExecuteOptions): Promise<SQLResult>;
150 | }
151 | 
152 | /**
153 |  * Registry for available database connectors
154 |  */
155 | export class ConnectorRegistry {
156 |   private static connectors: Map<ConnectorType, Connector> = new Map();
157 | 
158 |   /**
159 |    * Register a new connector
160 |    */
161 |   static register(connector: Connector): void {
162 |     ConnectorRegistry.connectors.set(connector.id, connector);
163 |   }
164 | 
165 |   /**
166 |    * Get a connector by ID
167 |    */
168 |   static getConnector(id: ConnectorType): Connector | null {
169 |     return ConnectorRegistry.connectors.get(id) || null;
170 |   }
171 | 
172 |   /**
173 |    * Get connector for a DSN string
174 |    * Tries to find a connector that can handle the given DSN format
175 |    */
176 |   static getConnectorForDSN(dsn: string): Connector | null {
177 |     for (const connector of ConnectorRegistry.connectors.values()) {
178 |       if (connector.dsnParser.isValidDSN(dsn)) {
179 |         return connector;
180 |       }
181 |     }
182 |     return null;
183 |   }
184 | 
185 |   /**
186 |    * Get all available connector IDs
187 |    */
188 |   static getAvailableConnectors(): ConnectorType[] {
189 |     return Array.from(ConnectorRegistry.connectors.keys());
190 |   }
191 | 
192 |   /**
193 |    * Get sample DSN for a specific connector
194 |    */
195 |   static getSampleDSN(connectorType: ConnectorType): string | null {
196 |     const connector = ConnectorRegistry.getConnector(connectorType);
197 |     if (!connector) return null;
198 |     return connector.dsnParser.getSampleDSN();
199 |   }
200 | 
201 |   /**
202 |    * Get all available sample DSNs
203 |    */
204 |   static getAllSampleDSNs(): { [key in ConnectorType]?: string } {
205 |     const samples: { [key in ConnectorType]?: string } = {};
206 |     for (const [id, connector] of ConnectorRegistry.connectors.entries()) {
207 |       samples[id] = connector.dsnParser.getSampleDSN();
208 |     }
209 |     return samples;
210 |   }
211 | }
212 | 
```
Page 1/8FirstPrevNextLast