#
tokens: 49583/50000 53/102 files (page 1/7)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 7. Use http://codebase.md/freepeak/db-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cm
│   └── gitstream.cm
├── .cursor
│   ├── mcp-example.json
│   ├── mcp.json
│   └── rules
│       └── global.mdc
├── .dockerignore
├── .DS_Store
├── .env.example
├── .github
│   ├── FUNDING.yml
│   └── workflows
│       └── go.yml
├── .gitignore
├── .golangci.yml
├── assets
│   └── logo.svg
├── CHANGELOG.md
├── cmd
│   └── server
│       └── main.go
├── commit-message.txt
├── config.json
├── config.timescaledb-test.json
├── docker-compose.mcp-test.yml
├── docker-compose.test.yml
├── docker-compose.timescaledb-test.yml
├── docker-compose.yml
├── docker-wrapper.sh
├── Dockerfile
├── docs
│   ├── REFACTORING.md
│   ├── TIMESCALEDB_FUNCTIONS.md
│   ├── TIMESCALEDB_IMPLEMENTATION.md
│   ├── TIMESCALEDB_PRD.md
│   └── TIMESCALEDB_TOOLS.md
├── examples
│   └── postgres_connection.go
├── glama.json
├── go.mod
├── go.sum
├── init-scripts
│   └── timescaledb
│       ├── 01-init.sql
│       ├── 02-sample-data.sql
│       ├── 03-continuous-aggregates.sql
│       └── README.md
├── internal
│   ├── config
│   │   ├── config_test.go
│   │   └── config.go
│   ├── delivery
│   │   └── mcp
│   │       ├── compression_policy_test.go
│   │       ├── context
│   │       │   ├── hypertable_schema_test.go
│   │       │   ├── timescale_completion_test.go
│   │       │   ├── timescale_context_test.go
│   │       │   └── timescale_query_suggestion_test.go
│   │       ├── mock_test.go
│   │       ├── response_test.go
│   │       ├── response.go
│   │       ├── retention_policy_test.go
│   │       ├── server_wrapper.go
│   │       ├── timescale_completion.go
│   │       ├── timescale_context.go
│   │       ├── timescale_schema.go
│   │       ├── timescale_tool_test.go
│   │       ├── timescale_tool.go
│   │       ├── timescale_tools_test.go
│   │       ├── tool_registry.go
│   │       └── tool_types.go
│   ├── domain
│   │   └── database.go
│   ├── logger
│   │   ├── logger_test.go
│   │   └── logger.go
│   ├── repository
│   │   └── database_repository.go
│   └── usecase
│       └── database_usecase.go
├── LICENSE
├── Makefile
├── pkg
│   ├── core
│   │   ├── core.go
│   │   └── logging.go
│   ├── db
│   │   ├── db_test.go
│   │   ├── db.go
│   │   ├── manager.go
│   │   ├── README.md
│   │   └── timescale
│   │       ├── config_test.go
│   │       ├── config.go
│   │       ├── connection_test.go
│   │       ├── connection.go
│   │       ├── continuous_aggregate_test.go
│   │       ├── continuous_aggregate.go
│   │       ├── hypertable_test.go
│   │       ├── hypertable.go
│   │       ├── metadata.go
│   │       ├── mocks_test.go
│   │       ├── policy_test.go
│   │       ├── policy.go
│   │       ├── query.go
│   │       ├── timeseries_test.go
│   │       └── timeseries.go
│   ├── dbtools
│   │   ├── db_helpers.go
│   │   ├── dbtools_test.go
│   │   ├── dbtools.go
│   │   ├── exec.go
│   │   ├── performance_test.go
│   │   ├── performance.go
│   │   ├── query.go
│   │   ├── querybuilder_test.go
│   │   ├── querybuilder.go
│   │   ├── README.md
│   │   ├── schema_test.go
│   │   ├── schema.go
│   │   ├── tx_test.go
│   │   └── tx.go
│   ├── internal
│   │   └── logger
│   │       └── logger.go
│   ├── jsonrpc
│   │   └── jsonrpc.go
│   ├── logger
│   │   └── logger.go
│   └── tools
│       └── tools.go
├── README-old.md
├── README.md
├── repomix-output.txt
├── request.json
├── start-mcp.sh
├── test.Dockerfile
├── timescaledb-test.sh
└── wait-for-it.sh
```

# Files

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Specific binary files
 2 | server
 3 | server-linux
 4 | multidb
 5 | multidb-linux
 6 | /bin/*
 7 | 
 8 | # Build & test output
 9 | *.out
10 | *.test
11 | coverage.out
12 | 
13 | # Dependency directories
14 | vendor/
15 | 
16 | # Environment files
17 | .env
18 | .env.*
19 | !.env.example
20 | 
21 | # Log files
22 | logs/
23 | *.log
24 | 
25 | # OS specific files
26 | .DS_Store
27 | .idea/
28 | .vscode/
29 | 
```

--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------

```
 1 | # Git files
 2 | .git
 3 | .gitignore
 4 | 
 5 | # Build artifacts
 6 | mcp-server
 7 | mcp-client
 8 | mcp-simple-client
 9 | 
10 | # Development environment files
11 | .env
12 | 
13 | # Editor files
14 | .vscode
15 | .idea
16 | 
17 | # Test files
18 | *_test.go
19 | *.test
20 | 
21 | # Database files
22 | *.db
23 | 
24 | # Documentation
25 | README.md
26 | docs/
27 | LICENSE
28 | 
29 | # OS specific
30 | .DS_Store
31 | Thumbs.db 
```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
 1 | # Server Configuration
 2 | SERVER_PORT=9090
 3 | TRANSPORT_MODE=sse
 4 | # Logging configuration
 5 | LOG_LEVEL=info
 6 | 
 7 | # Legacy Single Database Configuration (optional)
 8 | DB_TYPE=mysql
 9 | DB_HOST=localhost
10 | DB_PORT=3306
11 | DB_USER=user
12 | DB_PASSWORD=password
13 | DB_NAME=dbname
14 | 
15 | # Multi-Database Configuration
16 | DB_CONFIG_FILE=config.json
17 | 
18 | # Additional Settings
19 | DEBUG=true
20 | 
21 | # Note: Create a copy of this file as .env and modify it with your own values 
```

--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------

```yaml
 1 | run:
 2 |   timeout: 5m
 3 |   modules-download-mode: readonly
 4 |   allow-parallel-runners: true
 5 | 
 6 | linters:
 7 |   disable-all: true
 8 |   enable:
 9 |     - errcheck
10 |     - gosimple
11 |     - govet
12 |     - ineffassign
13 |     - staticcheck
14 |     - unused
15 |     - gofmt
16 |     - goimports
17 |     - misspell
18 |     - revive
19 | 
20 | linters-settings:
21 |   gofmt:
22 |     simplify: true
23 |   goimports:
24 |     local-prefixes: github.com/FreePeak/db-mcp-server
25 |   govet:
26 |   revive:
27 |     rules:
28 |       - name: var-naming
29 |         severity: warning
30 |         disabled: false
31 |       - name: exported
32 |         severity: warning
33 |         disabled: false
34 |   errcheck:
35 |     # Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
36 |     check-type-assertions: true
37 |     # Report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`.
38 |     check-blank: true
39 |     # List of functions to exclude from error checking (useful for os.Setenv)
40 |     exclude-functions:
41 |       - os.Setenv
42 | 
43 | issues:
44 |   exclude-use-default: false
45 |   max-issues-per-linter: 0
46 |   max-same-issues: 0
47 |   exclude-dirs:
48 |     - vendor/
49 |   exclude:
50 |     - "exported \\w+ (\\S*['.]*)([a-zA-Z'.*]*) should have comment or be unexported"
51 |   # Excluding the specific errors that are reported but that we can't reproduce locally  
52 |   exclude-rules:
53 |     - path: internal/delivery/mcp/tool_registry.go
54 |       text: "Error return value of `tr.registerTool` is not checked"
55 |     - path: internal/delivery/mcp/tool_registry.go  
56 |       text: "Error return value of `tr.createToolAlias` is not checked"
57 |     - path: cmd/server/main.go
58 |       text: "Error return value of `os.Setenv` is not checked"
59 |     - path: _test\.go$
60 |       linters:
61 |         - errcheck
62 |     - path: pkg/dbtools/dbtools.go
63 |       text: "func `_loadConfigFromFile` is unused"
64 |       linters:
65 |         - unused
66 |     - path: pkg/dbtools/dbtools.go
67 |       text: "func `_getEnv` is unused"
68 |       linters:
69 |         - unused
70 |     - path: pkg/dbtools/dbtools.go
71 |       text: "func `_getIntEnv` is unused"
72 |       linters:
73 |         - unused
74 |     - path: pkg/dbtools/dbtools.go
75 |       text: "func `_loadConfigFromEnv` is unused"
76 |       linters:
77 |         - unused 
```

--------------------------------------------------------------------------------
/init-scripts/timescaledb/README.md:
--------------------------------------------------------------------------------

```markdown
 1 | # TimescaleDB Test Environment
 2 | 
 3 | This directory contains initialization scripts for setting up a TimescaleDB test environment with sample data and structures for testing the DB-MCP-Server TimescaleDB integration.
 4 | 
 5 | ## Overview
 6 | 
 7 | The initialization scripts in this directory are executed automatically when the TimescaleDB Docker container starts up. They set up:
 8 | 
 9 | 1. Required extensions and schemas
10 | 2. Sample tables and hypertables for various time-series data types
11 | 3. Sample data with realistic patterns
12 | 4. Continuous aggregates, compression policies, and retention policies
13 | 5. Test users with different permission levels
14 | 
15 | ## Scripts
16 | 
17 | The scripts are executed in alphabetical order:
18 | 
19 | - **01-init.sql**: Creates the TimescaleDB extension, test schema, tables, hypertables, and test users
20 | - **02-sample-data.sql**: Populates the tables with sample time-series data
21 | - **03-continuous-aggregates.sql**: Creates continuous aggregates, compression, and retention policies
22 | 
23 | ## Test Data Overview
24 | 
25 | The test environment includes the following sample datasets:
26 | 
27 | 1. **sensor_readings**: Simulated IoT sensor data with temperature, humidity, pressure readings
28 | 2. **weather_observations**: Weather station data with temperature, precipitation, wind readings
29 | 3. **device_metrics**: System monitoring data with CPU, memory, network metrics
30 | 4. **stock_prices**: Financial time-series data with OHLC price data
31 | 5. **multi_partition_data**: Data with both time and space partitioning
32 | 6. **regular_table**: Non-hypertable for comparison testing
33 | 
34 | ## Test Users
35 | 
36 | - **timescale_user**: Main admin user (password: timescale_password)
37 | - **test_readonly**: Read-only access user (password: readonly_password)  
38 | - **test_readwrite**: Read-write access user (password: readwrite_password)
39 | 
40 | ## Usage
41 | 
42 | This test environment is automatically set up when running:
43 | 
44 | ```
45 | ./timescaledb-test.sh start
46 | ```
47 | 
48 | You can access the database directly:
49 | 
50 | ```
51 | psql postgresql://timescale_user:timescale_password@localhost:15435/timescale_test
52 | ```
53 | 
54 | Or through the MCP server:
55 | 
56 | ```
57 | http://localhost:9093
58 | ```
59 | 
60 | ## Available Databases in MCP Server
61 | 
62 | - **timescaledb_test**: Full admin access via timescale_user
63 | - **timescaledb_readonly**: Read-only access via test_readonly user
64 | - **timescaledb_readwrite**: Read-write access via test_readwrite user 
```

--------------------------------------------------------------------------------
/pkg/db/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Database Package
  2 | 
  3 | This package provides a unified database interface that works with both MySQL and PostgreSQL databases, including PostgreSQL 17. It handles connection management, pooling, and query execution.
  4 | 
  5 | ## Features
  6 | 
  7 | - Unified interface for MySQL and PostgreSQL (all versions)
  8 | - Comprehensive PostgreSQL connection options for compatibility with all versions
  9 | - Connection pooling with configurable parameters
 10 | - Context-aware query execution with timeout support
 11 | - Transaction support
 12 | - Proper error handling
 13 | 
 14 | ## PostgreSQL Version Compatibility
 15 | 
 16 | This package is designed to be compatible with all PostgreSQL versions, including:
 17 | - PostgreSQL 10+
 18 | - PostgreSQL 14+
 19 | - PostgreSQL 15+
 20 | - PostgreSQL 16+
 21 | - PostgreSQL 17
 22 | 
 23 | The connection string builder automatically adapts to specific PostgreSQL version requirements.
 24 | 
 25 | ## Configuration Options
 26 | 
 27 | ### Basic Configuration
 28 | 
 29 | Configure the database connection using the `Config` struct:
 30 | 
 31 | ```go
 32 | cfg := db.Config{
 33 |     Type:            "mysql", // or "postgres"
 34 |     Host:            "localhost",
 35 |     Port:            3306,
 36 |     User:            "user",
 37 |     Password:        "password",
 38 |     Name:            "dbname",
 39 |     MaxOpenConns:    25,
 40 |     MaxIdleConns:    5,
 41 |     ConnMaxLifetime: 5 * time.Minute,
 42 |     ConnMaxIdleTime: 5 * time.Minute,
 43 | }
 44 | ```
 45 | 
 46 | ### PostgreSQL-Specific Options
 47 | 
 48 | For PostgreSQL databases, additional options are available:
 49 | 
 50 | ```go
 51 | cfg := db.Config{
 52 |     Type:            "postgres",
 53 |     Host:            "localhost",
 54 |     Port:            5432,
 55 |     User:            "user",
 56 |     Password:        "password",
 57 |     Name:            "dbname",
 58 |     
 59 |     // PostgreSQL-specific options
 60 |     SSLMode:          db.SSLPrefer,                   // SSL mode (disable, prefer, require, verify-ca, verify-full)
 61 |     SSLCert:          "/path/to/client-cert.pem",     // Client certificate file
 62 |     SSLKey:           "/path/to/client-key.pem",      // Client key file
 63 |     SSLRootCert:      "/path/to/root-cert.pem",       // Root certificate file
 64 |     ApplicationName:  "myapp",                        // Application name for pg_stat_activity
 65 |     ConnectTimeout:   10,                             // Connection timeout in seconds
 66 |     TargetSessionAttrs: "any",                        // For load balancing (any, read-write, read-only, primary, standby)
 67 |     
 68 |     // Additional connection parameters
 69 |     Options: map[string]string{
 70 |         "client_encoding": "UTF8",
 71 |         "timezone":        "UTC",
 72 |     },
 73 |     
 74 |     // Connection pool settings
 75 |     MaxOpenConns:    25,
 76 |     MaxIdleConns:    5,
 77 |     ConnMaxLifetime: 5 * time.Minute,
 78 |     ConnMaxIdleTime: 5 * time.Minute,
 79 | }
 80 | ```
 81 | 
 82 | ### JSON Configuration
 83 | 
 84 | When using JSON configuration files, the PostgreSQL options are specified as follows:
 85 | 
 86 | ```json
 87 | {
 88 |   "id": "postgres17",
 89 |   "type": "postgres",
 90 |   "host": "postgres17",
 91 |   "port": 5432,
 92 |   "name": "mydb",
 93 |   "user": "postgres",
 94 |   "password": "password",
 95 |   "ssl_mode": "prefer",
 96 |   "application_name": "myapp",
 97 |   "connect_timeout": 15,
 98 |   "target_session_attrs": "any",
 99 |   "options": {
100 |     "application_name": "myapp",
101 |     "client_encoding": "UTF8"
102 |   },
103 |   "max_open_conns": 25,
104 |   "max_idle_conns": 5,
105 |   "conn_max_lifetime_seconds": 300,
106 |   "conn_max_idle_time_seconds": 60
107 | }
108 | ```
109 | 
110 | ## Usage Examples
111 | 
112 | ### Connecting to the Database
113 | 
114 | ```go
115 | // Create a new database instance
116 | database, err := db.NewDatabase(cfg)
117 | if err != nil {
118 |     log.Fatalf("Failed to create database instance: %v", err)
119 | }
120 | 
121 | // Connect to the database
122 | if err := database.Connect(); err != nil {
123 |     log.Fatalf("Failed to connect to database: %v", err)
124 | }
125 | defer database.Close()
126 | ```
127 | 
128 | ### Executing Queries
129 | 
130 | ```go
131 | // Context with timeout
132 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
133 | defer cancel()
134 | 
135 | // Execute a query that returns rows
136 | rows, err := database.Query(ctx, "SELECT id, name FROM users WHERE age > $1", 18)
137 | if err != nil {
138 |     log.Fatalf("Query failed: %v", err)
139 | }
140 | defer rows.Close()
141 | 
142 | // Process rows
143 | for rows.Next() {
144 |     var id int
145 |     var name string
146 |     if err := rows.Scan(&id, &name); err != nil {
147 |         log.Printf("Failed to scan row: %v", err)
148 |         continue
149 |     }
150 |     fmt.Printf("User: %d - %s\n", id, name)
151 | }
152 | 
153 | if err = rows.Err(); err != nil {
154 |     log.Printf("Error during row iteration: %v", err)
155 | }
156 | ```
157 | 
158 | ### Using the Database Manager
159 | 
160 | ```go
161 | // Create a database manager
162 | manager := db.NewDBManager()
163 | 
164 | // Load configuration from JSON
165 | configJSON, err := ioutil.ReadFile("config.json")
166 | if err != nil {
167 |     log.Fatalf("Failed to read config file: %v", err)
168 | }
169 | 
170 | if err := manager.LoadConfig(configJSON); err != nil {
171 |     log.Fatalf("Failed to load database config: %v", err)
172 | }
173 | 
174 | // Connect to all databases
175 | if err := manager.Connect(); err != nil {
176 |     log.Fatalf("Failed to connect to databases: %v", err)
177 | }
178 | defer manager.CloseAll()
179 | 
180 | // Get a specific database connection
181 | postgres17, err := manager.GetDatabase("postgres17")
182 | if err != nil {
183 |     log.Fatalf("Failed to get database: %v", err)
184 | }
185 | 
186 | // Use the database
187 | // ...
188 | ```
189 | 
190 | ## PostgreSQL 17 Support
191 | 
192 | This package fully supports PostgreSQL 17 by:
193 | 
194 | 1. Using connection string parameters compatible with PostgreSQL 17
195 | 2. Supporting all PostgreSQL 17 connection options including TLS/SSL modes
196 | 3. Properly handling connection pool management
197 | 4. Working with both older and newer versions of PostgreSQL on the same codebase 
```

--------------------------------------------------------------------------------
/pkg/dbtools/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Database Tools Package
  2 | 
  3 | This package provides tools for interacting with databases in the MCP Server. It exposes database functionality as MCP tools that can be invoked by clients.
  4 | 
  5 | ## Features
  6 | 
  7 | - Database query tool for executing SELECT statements
  8 | - Database execute tool for executing non-query statements (INSERT, UPDATE, DELETE)
  9 | - Transaction management tool for executing multiple statements atomically
 10 | - Schema explorer tool for auto-discovering database structure and relationships
 11 | - Performance analyzer tool for identifying slow queries and optimization opportunities
 12 | - Support for both MySQL and PostgreSQL databases
 13 | - Parameterized queries to prevent SQL injection
 14 | - Connection pooling for optimal performance
 15 | - Timeouts for preventing long-running queries
 16 | 
 17 | ## Available Tools
 18 | 
 19 | ### 1. Database Query Tool (`dbQuery`)
 20 | 
 21 | Executes a SQL query and returns the results.
 22 | 
 23 | **Parameters:**
 24 | - `query` (string, required): SQL query to execute
 25 | - `params` (array): Parameters for prepared statements
 26 | - `timeout` (integer): Query timeout in milliseconds (default: 5000)
 27 | 
 28 | **Example:**
 29 | ```json
 30 | {
 31 |   "query": "SELECT id, name, email FROM users WHERE status = ? AND created_at > ?",
 32 |   "params": ["active", "2023-01-01T00:00:00Z"],
 33 |   "timeout": 10000
 34 | }
 35 | ```
 36 | 
 37 | **Returns:**
 38 | ```json
 39 | {
 40 |   "rows": [
 41 |     {"id": 1, "name": "John", "email": "[email protected]"},
 42 |     {"id": 2, "name": "Jane", "email": "[email protected]"}
 43 |   ],
 44 |   "count": 2,
 45 |   "query": "SELECT id, name, email FROM users WHERE status = ? AND created_at > ?",
 46 |   "params": ["active", "2023-01-01T00:00:00Z"]
 47 | }
 48 | ```
 49 | 
 50 | ### 2. Database Execute Tool (`dbExecute`)
 51 | 
 52 | Executes a SQL statement that doesn't return results (INSERT, UPDATE, DELETE).
 53 | 
 54 | **Parameters:**
 55 | - `statement` (string, required): SQL statement to execute
 56 | - `params` (array): Parameters for prepared statements
 57 | - `timeout` (integer): Execution timeout in milliseconds (default: 5000)
 58 | 
 59 | **Example:**
 60 | ```json
 61 | {
 62 |   "statement": "INSERT INTO users (name, email, status) VALUES (?, ?, ?)",
 63 |   "params": ["Alice", "[email protected]", "active"],
 64 |   "timeout": 10000
 65 | }
 66 | ```
 67 | 
 68 | **Returns:**
 69 | ```json
 70 | {
 71 |   "rowsAffected": 1,
 72 |   "lastInsertId": 3,
 73 |   "statement": "INSERT INTO users (name, email, status) VALUES (?, ?, ?)",
 74 |   "params": ["Alice", "[email protected]", "active"]
 75 | }
 76 | ```
 77 | 
 78 | ### 3. Database Transaction Tool (`dbTransaction`)
 79 | 
 80 | Manages database transactions for executing multiple statements atomically.
 81 | 
 82 | **Parameters:**
 83 | - `action` (string, required): Action to perform (begin, commit, rollback, execute)
 84 | - `transactionId` (string): Transaction ID (returned from begin, required for all other actions)
 85 | - `statement` (string): SQL statement to execute (required for execute action)
 86 | - `params` (array): Parameters for the statement
 87 | - `readOnly` (boolean): Whether the transaction is read-only (for begin action)
 88 | - `timeout` (integer): Timeout in milliseconds (default: 30000)
 89 | 
 90 | **Example - Begin Transaction:**
 91 | ```json
 92 | {
 93 |   "action": "begin",
 94 |   "readOnly": false,
 95 |   "timeout": 60000
 96 | }
 97 | ```
 98 | 
 99 | **Returns:**
100 | ```json
101 | {
102 |   "transactionId": "tx-1625135848693",
103 |   "readOnly": false,
104 |   "status": "active"
105 | }
106 | ```
107 | 
108 | **Example - Execute in Transaction:**
109 | ```json
110 | {
111 |   "action": "execute",
112 |   "transactionId": "tx-1625135848693",
113 |   "statement": "UPDATE accounts SET balance = balance - ? WHERE id = ?",
114 |   "params": [100.00, 123]
115 | }
116 | ```
117 | 
118 | **Example - Commit Transaction:**
119 | ```json
120 | {
121 |   "action": "commit",
122 |   "transactionId": "tx-1625135848693"
123 | }
124 | ```
125 | 
126 | **Returns:**
127 | ```json
128 | {
129 |   "transactionId": "tx-1625135848693",
130 |   "status": "committed"
131 | }
132 | ```
133 | 
134 | ### 4. Database Schema Explorer Tool (`dbSchema`)
135 | 
136 | Auto-discovers database structure and relationships, including tables, columns, and foreign keys.
137 | 
138 | **Parameters:**
139 | - `component` (string, required): Schema component to explore (tables, columns, relationships, or full)
140 | - `table` (string): Table name (required when component is 'columns' and optional for 'relationships')
141 | - `timeout` (integer): Query timeout in milliseconds (default: 10000)
142 | 
143 | **Example - Get All Tables:**
144 | ```json
145 | {
146 |   "component": "tables"
147 | }
148 | ```
149 | 
150 | **Returns:**
151 | ```json
152 | {
153 |   "tables": [
154 |     {
155 |       "name": "users",
156 |       "type": "BASE TABLE",
157 |       "engine": "InnoDB",
158 |       "estimated_row_count": 1500,
159 |       "create_time": "2023-01-15T10:30:45Z",
160 |       "update_time": "2023-06-20T14:15:30Z"
161 |     },
162 |     {
163 |       "name": "orders",
164 |       "type": "BASE TABLE",
165 |       "engine": "InnoDB",
166 |       "estimated_row_count": 8750,
167 |       "create_time": "2023-01-15T10:35:12Z",
168 |       "update_time": "2023-06-25T09:40:18Z"
169 |     }
170 |   ],
171 |   "count": 2,
172 |   "type": "mysql"
173 | }
174 | ```
175 | 
176 | **Example - Get Table Columns:**
177 | ```json
178 | {
179 |   "component": "columns",
180 |   "table": "users"
181 | }
182 | ```
183 | 
184 | **Returns:**
185 | ```json
186 | {
187 |   "table": "users",
188 |   "columns": [
189 |     {
190 |       "name": "id",
191 |       "type": "int(11)",
192 |       "nullable": "NO",
193 |       "key": "PRI",
194 |       "extra": "auto_increment",
195 |       "default": null,
196 |       "max_length": null,
197 |       "numeric_precision": 10,
198 |       "numeric_scale": 0,
199 |       "comment": "User unique identifier"
200 |     },
201 |     {
202 |       "name": "email",
203 |       "type": "varchar(255)",
204 |       "nullable": "NO",
205 |       "key": "UNI",
206 |       "extra": "",
207 |       "default": null,
208 |       "max_length": 255,
209 |       "numeric_precision": null,
210 |       "numeric_scale": null,
211 |       "comment": "User email address"
212 |     }
213 |   ],
214 |   "count": 2,
215 |   "type": "mysql"
216 | }
217 | ```
218 | 
219 | **Example - Get Relationships:**
220 | ```json
221 | {
222 |   "component": "relationships",
223 |   "table": "orders"
224 | }
225 | ```
226 | 
227 | **Returns:**
228 | ```json
229 | {
230 |   "relationships": [
231 |     {
232 |       "constraint_name": "fk_orders_users",
233 |       "table_name": "orders",
234 |       "column_name": "user_id",
235 |       "referenced_table_name": "users",
236 |       "referenced_column_name": "id",
237 |       "update_rule": "CASCADE",
238 |       "delete_rule": "RESTRICT"
239 |     }
240 |   ],
241 |   "count": 1,
242 |   "type": "mysql",
243 |   "table": "orders"
244 | }
245 | ```
246 | 
247 | **Example - Get Full Schema:**
248 | ```json
249 | {
250 |   "component": "full"
251 | }
252 | ```
253 | 
254 | **Returns:**
255 | A comprehensive schema including tables, columns, and relationships in a structured format.
256 | 
257 | ### 5. Database Performance Analyzer Tool (`dbPerformanceAnalyzer`)
258 | 
259 | Identifies slow queries and provides optimization suggestions for better performance.
260 | 
261 | **Parameters:**
262 | - `action` (string, required): Action to perform (getSlowQueries, getMetrics, analyzeQuery, reset, setThreshold)
263 | - `query` (string): SQL query to analyze (required for analyzeQuery action)
264 | - `threshold` (integer): Threshold in milliseconds for identifying slow queries (required for setThreshold action)
265 | - `limit` (integer): Maximum number of results to return (default: 10)
266 | 
267 | **Example - Get Slow Queries:**
268 | ```json
269 | {
270 |   "action": "getSlowQueries",
271 |   "limit": 5
272 | }
273 | ```
274 | 
275 | **Returns:**
276 | ```json
277 | {
278 |   "queries": [
279 |     {
280 |       "query": "SELECT * FROM orders JOIN order_items ON orders.id = order_items.order_id WHERE orders.status = 'pending'",
281 |       "count": 15,
282 |       "avgDuration": "750.25ms",
283 |       "minDuration": "520.50ms",
284 |       "maxDuration": "1250.75ms",
285 |       "totalDuration": "11253.75ms",
286 |       "lastExecuted": "2023-06-25T14:30:45Z"
287 |     },
288 |     {
289 |       "query": "SELECT * FROM users WHERE last_login > '2023-01-01'",
290 |       "count": 25,
291 |       "avgDuration": "650.30ms",
292 |       "minDuration": "450.20ms",
293 |       "maxDuration": "980.15ms",
294 |       "totalDuration": "16257.50ms",
295 |       "lastExecuted": "2023-06-25T14:15:22Z"
296 |     }
297 |   ],
298 |   "count": 2
299 | }
300 | ```
301 | 
302 | **Example - Analyze Query:**
303 | ```json
304 | {
305 |   "action": "analyzeQuery",
306 |   "query": "SELECT * FROM users JOIN orders ON users.id = orders.user_id WHERE orders.total > 100 ORDER BY users.name"
307 | }
308 | ```
309 | 
310 | **Returns:**
311 | ```json
312 | {
313 |   "query": "SELECT * FROM users JOIN orders ON users.id = orders.user_id WHERE orders.total > 100 ORDER BY users.name",
314 |   "suggestions": [
315 |     "Avoid using SELECT * - specify only the columns you need",
316 |     "Verify that ORDER BY columns are properly indexed"
317 |   ]
318 | }
319 | ```
320 | 
321 | **Example - Set Slow Query Threshold:**
322 | ```json
323 | {
324 |   "action": "setThreshold",
325 |   "threshold": 300
326 | }
327 | ```
328 | 
329 | **Returns:**
330 | ```json
331 | {
332 |   "success": true,
333 |   "message": "Slow query threshold updated",
334 |   "threshold": "300ms"
335 | }
336 | ```
337 | 
338 | **Example - Reset Performance Metrics:**
339 | ```json
340 | {
341 |   "action": "reset"
342 | }
343 | ```
344 | 
345 | **Returns:**
346 | ```json
347 | {
348 |   "success": true,
349 |   "message": "Performance metrics have been reset"
350 | }
351 | ```
352 | 
353 | **Example - Get All Query Metrics:**
354 | ```json
355 | {
356 |   "action": "getMetrics",
357 |   "limit": 3
358 | }
359 | ```
360 | 
361 | **Returns:**
362 | ```json
363 | {
364 |   "queries": [
365 |     {
366 |       "query": "SELECT id, name, email FROM users WHERE status = ?",
367 |       "count": 45,
368 |       "avgDuration": "12.35ms",
369 |       "minDuration": "5.20ms",
370 |       "maxDuration": "28.75ms",
371 |       "totalDuration": "555.75ms",
372 |       "lastExecuted": "2023-06-25T14:45:12Z"
373 |     },
374 |     {
375 |       "query": "SELECT * FROM orders WHERE user_id = ? AND created_at > ?",
376 |       "count": 30,
377 |       "avgDuration": "25.45ms",
378 |       "minDuration": "15.30ms",
379 |       "maxDuration": "45.80ms",
380 |       "totalDuration": "763.50ms",
381 |       "lastExecuted": "2023-06-25T14:40:18Z"
382 |     },
383 |     {
384 |       "query": "UPDATE users SET last_login = ? WHERE id = ?",
385 |       "count": 15,
386 |       "avgDuration": "18.25ms",
387 |       "minDuration": "10.50ms",
388 |       "maxDuration": "35.40ms",
389 |       "totalDuration": "273.75ms",
390 |       "lastExecuted": "2023-06-25T14:35:30Z"
391 |     }
392 |   ],
393 |   "count": 3
394 | }
395 | ```
396 | 
397 | ## Setup
398 | 
399 | To use these tools, initialize the database connection and register the tools:
400 | 
401 | ```go
402 | // Initialize database
403 | err := dbtools.InitDatabase(config)
404 | if err != nil {
405 |     log.Fatalf("Failed to initialize database: %v", err)
406 | }
407 | 
408 | // Register database tools
409 | dbtools.RegisterDatabaseTools(toolRegistry)
410 | ```
411 | 
412 | ## Error Handling
413 | 
414 | All tools return detailed error messages that indicate the specific issue. Common errors include:
415 | 
416 | - Database connection issues
417 | - Invalid SQL syntax
418 | - Transaction not found
419 | - Timeout errors
420 | - Permission errors
421 | 
422 | For transactions, always ensure you commit or rollback to avoid leaving transactions open. 
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | <div align="center">
  2 | 
  3 | <img src="assets/logo.svg" alt="DB MCP Server Logo" width="300" />
  4 | 
  5 | # Multi Database MCP Server
  6 | 
  7 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
  8 | [![Go Report Card](https://goreportcard.com/badge/github.com/FreePeak/db-mcp-server)](https://goreportcard.com/report/github.com/FreePeak/db-mcp-server)
  9 | [![Go Reference](https://pkg.go.dev/badge/github.com/FreePeak/db-mcp-server.svg)](https://pkg.go.dev/github.com/FreePeak/db-mcp-server)
 10 | [![Contributors](https://img.shields.io/github/contributors/FreePeak/db-mcp-server)](https://github.com/FreePeak/db-mcp-server/graphs/contributors)
 11 | 
 12 | <h3>A powerful multi-database server implementing the Model Context Protocol (MCP) to provide AI assistants with structured access to databases.</h3>
 13 | 
 14 | <div class="toc">
 15 |   <a href="#overview">Overview</a> •
 16 |   <a href="#core-concepts">Core Concepts</a> •
 17 |   <a href="#features">Features</a> •
 18 |   <a href="#supported-databases">Supported Databases</a> •
 19 |   <a href="#deployment-options">Deployment Options</a> •
 20 |   <a href="#configuration">Configuration</a> •
 21 |   <a href="#available-tools">Available Tools</a> •
 22 |   <a href="#examples">Examples</a> •
 23 |   <a href="#troubleshooting">Troubleshooting</a> •
 24 |   <a href="#contributing">Contributing</a>
 25 | </div>
 26 | 
 27 | </div>
 28 | 
 29 | ## Overview
 30 | 
 31 | The DB MCP Server provides a standardized way for AI models to interact with multiple databases simultaneously. Built on the [FreePeak/cortex](https://github.com/FreePeak/cortex) framework, it enables AI assistants to execute SQL queries, manage transactions, explore schemas, and analyze performance across different database systems through a unified interface.
 32 | 
 33 | ## Core Concepts
 34 | 
 35 | ### Multi-Database Support
 36 | 
 37 | Unlike traditional database connectors, DB MCP Server can connect to and interact with multiple databases concurrently:
 38 | 
 39 | ```json
 40 | {
 41 |   "connections": [
 42 |     {
 43 |       "id": "mysql1",
 44 |       "type": "mysql",
 45 |       "host": "localhost",
 46 |       "port": 3306,
 47 |       "name": "db1",
 48 |       "user": "user1",
 49 |       "password": "password1"
 50 |     },
 51 |     {
 52 |       "id": "postgres1",
 53 |       "type": "postgres",
 54 |       "host": "localhost",
 55 |       "port": 5432,
 56 |       "name": "db2",
 57 |       "user": "user2",
 58 |       "password": "password2"
 59 |     }
 60 |   ]
 61 | }
 62 | ```
 63 | 
 64 | ### Dynamic Tool Generation
 65 | 
 66 | For each connected database, the server automatically generates specialized tools:
 67 | 
 68 | ```go
 69 | // For a database with ID "mysql1", these tools are generated:
 70 | query_mysql1       // Execute SQL queries
 71 | execute_mysql1     // Run data modification statements
 72 | transaction_mysql1 // Manage transactions
 73 | schema_mysql1      // Explore database schema
 74 | performance_mysql1 // Analyze query performance
 75 | ```
 76 | 
 77 | ### Clean Architecture
 78 | 
 79 | The server follows Clean Architecture principles with these layers:
 80 | 
 81 | 1. **Domain Layer**: Core business entities and interfaces
 82 | 2. **Repository Layer**: Data access implementations
 83 | 3. **Use Case Layer**: Application business logic
 84 | 4. **Delivery Layer**: External interfaces (MCP tools)
 85 | 
 86 | ## Features
 87 | 
 88 | - **Simultaneous Multi-Database Support**: Connect to multiple MySQL and PostgreSQL databases concurrently
 89 | - **Database-Specific Tool Generation**: Auto-creates specialized tools for each connected database
 90 | - **Clean Architecture**: Modular design with clear separation of concerns
 91 | - **OpenAI Agents SDK Compatibility**: Full compatibility for seamless AI assistant integration
 92 | - **Dynamic Database Tools**: Execute queries, run statements, manage transactions, explore schemas, analyze performance
 93 | - **Unified Interface**: Consistent interaction patterns across different database types
 94 | - **Connection Management**: Simple configuration for multiple database connections
 95 | 
 96 | ## Supported Databases
 97 | 
 98 | | Database   | Status                    | Features                                                     |
 99 | | ---------- | ------------------------- | ------------------------------------------------------------ |
100 | | MySQL      | ✅ Full Support           | Queries, Transactions, Schema Analysis, Performance Insights |
101 | | PostgreSQL | ✅ Full Support (v9.6-17) | Queries, Transactions, Schema Analysis, Performance Insights |
102 | | TimescaleDB| ✅ Full Support           | Hypertables, Time-Series Queries, Continuous Aggregates, Compression, Retention Policies |
103 | 
104 | ## Deployment Options
105 | 
106 | The DB MCP Server can be deployed in multiple ways to suit different environments and integration needs:
107 | 
108 | ### Docker Deployment
109 | 
110 | ```bash
111 | # Pull the latest image
112 | docker pull freepeak/db-mcp-server:latest
113 | 
114 | # Run with mounted config file
115 | docker run -p 9092:9092 \
116 |   -v $(pwd)/config.json:/app/my-config.json \
117 |   -e TRANSPORT_MODE=sse \
118 |   -e CONFIG_PATH=/app/my-config.json \
119 |   freepeak/db-mcp-server
120 | ```
121 | 
122 | > **Note**: Mount to `/app/my-config.json` as the container has a default file at `/app/config.json`.
123 | 
124 | ### STDIO Mode (IDE Integration)
125 | 
126 | ```bash
127 | # Run the server in STDIO mode
128 | ./bin/server -t stdio -c config.json
129 | ```
130 | 
131 | For Cursor IDE integration, add to `.cursor/mcp.json`:
132 | 
133 | ```json
134 | {
135 |   "mcpServers": {
136 |     "stdio-db-mcp-server": {
137 |       "command": "/path/to/db-mcp-server/server",
138 |       "args": ["-t", "stdio", "-c", "/path/to/config.json"]
139 |     }
140 |   }
141 | }
142 | ```
143 | 
144 | ### SSE Mode (Server-Sent Events)
145 | 
146 | ```bash
147 | # Default configuration (localhost:9092)
148 | ./bin/server -t sse -c config.json
149 | 
150 | # Custom host and port
151 | ./bin/server -t sse -host 0.0.0.0 -port 8080 -c config.json
152 | ```
153 | 
154 | Client connection endpoint: `http://localhost:9092/sse`
155 | 
156 | ### Source Code Installation
157 | 
158 | ```bash
159 | # Clone the repository
160 | git clone https://github.com/FreePeak/db-mcp-server.git
161 | cd db-mcp-server
162 | 
163 | # Build the server
164 | make build
165 | 
166 | # Run the server
167 | ./bin/server -t sse -c config.json
168 | ```
169 | 
170 | ## Configuration
171 | 
172 | ### Database Configuration File
173 | 
174 | Create a `config.json` file with your database connections:
175 | 
176 | ```json
177 | {
178 |   "connections": [
179 |     {
180 |       "id": "mysql1",
181 |       "type": "mysql",
182 |       "host": "mysql1",
183 |       "port": 3306,
184 |       "name": "db1",
185 |       "user": "user1",
186 |       "password": "password1",
187 |       "query_timeout": 60,
188 |       "max_open_conns": 20,
189 |       "max_idle_conns": 5,
190 |       "conn_max_lifetime_seconds": 300,
191 |       "conn_max_idle_time_seconds": 60
192 |     },
193 |     {
194 |       "id": "postgres1",
195 |       "type": "postgres",
196 |       "host": "postgres1",
197 |       "port": 5432,
198 |       "name": "db1",
199 |       "user": "user1",
200 |       "password": "password1"
201 |     }
202 |   ]
203 | }
204 | ```
205 | 
206 | ### Command-Line Options
207 | 
208 | ```bash
209 | # Basic syntax
210 | ./bin/server -t <transport> -c <config-file>
211 | 
212 | # SSE transport options
213 | ./bin/server -t sse -host <hostname> -port <port> -c <config-file>
214 | 
215 | # Inline database configuration
216 | ./bin/server -t stdio -db-config '{"connections":[...]}'
217 | 
218 | # Environment variable configuration
219 | export DB_CONFIG='{"connections":[...]}'
220 | ./bin/server -t stdio
221 | ```
222 | 
223 | ## Available Tools
224 | 
225 | For each connected database, DB MCP Server automatically generates these specialized tools:
226 | 
227 | ### Query Tools
228 | 
229 | | Tool Name | Description |
230 | |-----------|-------------|
231 | | `query_<db_id>` | Execute SELECT queries and get results as a tabular dataset |
232 | | `execute_<db_id>` | Run data manipulation statements (INSERT, UPDATE, DELETE) |
233 | | `transaction_<db_id>` | Begin, commit, and rollback transactions |
234 | 
235 | ### Schema Tools
236 | 
237 | | Tool Name | Description |
238 | |-----------|-------------|
239 | | `schema_<db_id>` | Get information about tables, columns, indexes, and foreign keys |
240 | | `generate_schema_<db_id>` | Generate SQL or code from database schema |
241 | 
242 | ### Performance Tools
243 | 
244 | | Tool Name | Description |
245 | |-----------|-------------|
246 | | `performance_<db_id>` | Analyze query performance and get optimization suggestions |
247 | 
248 | ### TimescaleDB Tools
249 | 
250 | For PostgreSQL databases with TimescaleDB extension, these additional specialized tools are available:
251 | 
252 | | Tool Name | Description |
253 | |-----------|-------------|
254 | | `timescaledb_<db_id>` | Perform general TimescaleDB operations |
255 | | `create_hypertable_<db_id>` | Convert a standard table to a TimescaleDB hypertable |
256 | | `list_hypertables_<db_id>` | List all hypertables in the database |
257 | | `time_series_query_<db_id>` | Execute optimized time-series queries with bucketing |
258 | | `time_series_analyze_<db_id>` | Analyze time-series data patterns |
259 | | `continuous_aggregate_<db_id>` | Create materialized views that automatically update |
260 | | `refresh_continuous_aggregate_<db_id>` | Manually refresh continuous aggregates |
261 | 
262 | For detailed documentation on TimescaleDB tools, see [TIMESCALEDB_TOOLS.md](docs/TIMESCALEDB_TOOLS.md).
263 | 
264 | ## Examples
265 | 
266 | ### Querying Multiple Databases
267 | 
268 | ```sql
269 | -- Query the first database
270 | query_mysql1("SELECT * FROM users LIMIT 10")
271 | 
272 | -- Query the second database in the same context
273 | query_postgres1("SELECT * FROM products WHERE price > 100")
274 | ```
275 | 
276 | ### Managing Transactions
277 | 
278 | ```sql
279 | -- Start a transaction
280 | transaction_mysql1("BEGIN")
281 | 
282 | -- Execute statements within the transaction
283 | execute_mysql1("INSERT INTO orders (customer_id, product_id) VALUES (1, 2)")
284 | execute_mysql1("UPDATE inventory SET stock = stock - 1 WHERE product_id = 2")
285 | 
286 | -- Commit or rollback
287 | transaction_mysql1("COMMIT")
288 | -- OR
289 | transaction_mysql1("ROLLBACK")
290 | ```
291 | 
292 | ### Exploring Database Schema
293 | 
294 | ```sql
295 | -- Get all tables in the database
296 | schema_mysql1("tables")
297 | 
298 | -- Get columns for a specific table
299 | schema_mysql1("columns", "users")
300 | 
301 | -- Get constraints
302 | schema_mysql1("constraints", "orders")
303 | ```
304 | 
305 | ## Troubleshooting
306 | 
307 | ### Common Issues
308 | 
309 | - **Connection Failures**: Verify network connectivity and database credentials
310 | - **Permission Errors**: Ensure the database user has appropriate permissions
311 | - **Timeout Issues**: Check the `query_timeout` setting in your configuration
312 | 
313 | ### Logs
314 | 
315 | Enable verbose logging for troubleshooting:
316 | 
317 | ```bash
318 | ./bin/server -t sse -c config.json -v
319 | ```
320 | 
321 | ## Contributing
322 | 
323 | We welcome contributions to the DB MCP Server project! To contribute:
324 | 
325 | 1. Fork the repository
326 | 2. Create a feature branch (`git checkout -b feature/amazing-feature`)
327 | 3. Commit your changes (`git commit -m 'feat: add amazing feature'`)
328 | 4. Push to the branch (`git push origin feature/amazing-feature`)
329 | 5. Open a Pull Request
330 | 
331 | Please see our [CONTRIBUTING.md](docs/CONTRIBUTING.md) file for detailed guidelines.
332 | 
333 | ## License
334 | 
335 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
```

--------------------------------------------------------------------------------
/request.json:
--------------------------------------------------------------------------------

```json
1 | {"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}
2 | 
```

--------------------------------------------------------------------------------
/glama.json:
--------------------------------------------------------------------------------

```json
1 | {
2 |     "$schema": "https://glama.ai/mcp/schemas/server.json",
3 |     "maintainers": [
4 |       "linhdmn"
5 |     ]
6 |   }
```

--------------------------------------------------------------------------------
/pkg/core/core.go:
--------------------------------------------------------------------------------

```go
 1 | // Package core provides the core functionality of the MCP server.
 2 | package core
 3 | 
 4 | // Version returns the current version of the MCP server.
 5 | func Version() string {
 6 | 	return "1.0.0"
 7 | }
 8 | 
 9 | // Name returns the name of the package.
10 | func Name() string {
11 | 	return "db-mcp-server"
12 | }
13 | 
```

--------------------------------------------------------------------------------
/docker-compose.mcp-test.yml:
--------------------------------------------------------------------------------

```yaml
 1 | version: "3.8"
 2 | 
 3 | services:
 4 |   db-mcp-test:
 5 |     build: 
 6 |       context: .
 7 |       dockerfile: test.Dockerfile
 8 |     volumes:
 9 |       - ./config.json:/app/config.json
10 |     # For testing purposes
11 |     tty: true
12 |     stdin_open: true
13 |     # Use this for debugging
14 |     command: ["bash"]
15 |     # Use this for actual MCP proxy execution
16 |     # command: ["mcp-proxy","/app/server-linux","-t","stdio","-no-log","--stdio"] 
```

--------------------------------------------------------------------------------
/pkg/core/logging.go:
--------------------------------------------------------------------------------

```go
 1 | package core
 2 | 
 3 | import (
 4 | 	"io"
 5 | 	"os"
 6 | 	"strings"
 7 | )
 8 | 
 9 | // IsLoggingDisabled checks if MCP logging should be disabled
10 | func IsLoggingDisabled() bool {
11 | 	val := os.Getenv("MCP_DISABLE_LOGGING")
12 | 	return strings.ToLower(val) == "true" || val == "1"
13 | }
14 | 
15 | // GetLogWriter returns the appropriate writer for logging based on configuration
16 | func GetLogWriter() io.Writer {
17 | 	if IsLoggingDisabled() {
18 | 		return io.Discard
19 | 	}
20 | 	return os.Stderr
21 | }
22 | 
```

--------------------------------------------------------------------------------
/.cursor/mcp-example.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |     "mcpServers": {
 3 |         "demo-sse": {
 4 |             "url": "http://127.0.0.1:9092/sse"
 5 |         },
 6 |         "demo-stdio": {
 7 |             "command": "/Users/harvey/Work/dev/FreePeak/Opensource/db-mcp-server/server",
 8 |             "args": [
 9 |                 "-t",
10 |                 "stdio",
11 |                 "-c",
12 |                 "/Users/harvey/Work/dev/FreePeak/SaaS/cashflow-core/database_config.json"
13 |             ]
14 |         }
15 |     }
16 | }
```

--------------------------------------------------------------------------------
/docker-wrapper.sh:
--------------------------------------------------------------------------------

```bash
 1 | #!/bin/bash
 2 | 
 3 | # This wrapper script ensures proper STDIO handling for the MCP server in Docker
 4 | 
 5 | # Export required environment variables
 6 | export MCP_DISABLE_LOGGING=true
 7 | export DISABLE_LOGGING=true
 8 | export TRANSPORT_MODE=stdio
 9 | 
10 | # Create a log directory
11 | mkdir -p /tmp/logs
12 | 
13 | # Run the server with proper redirection
14 | # All stdout goes to the MCP proxy, while stderr goes to a file
15 | exec /app/multidb-linux -t stdio 2>/tmp/logs/server.log 
```

--------------------------------------------------------------------------------
/.cursor/mcp.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |     "mcpServers": {
 3 |         "multidb": {
 4 |             "command": "/Users/harvey/Work/dev/FreePeak/Opensource/db-mcp-server/bin/server",
 5 |             "args": [
 6 |                 "-t",
 7 |                 "stdio",
 8 |                 "-c",
 9 |                 "/Users/harvey/Work/dev/FreePeak/SaaS/CashFlow/cashflow-core/database_config.json"
10 |             ]
11 |         },
12 |         "multidb2": {
13 |             "url": "http://127.0.0.1:9090/sse"
14 |         },
15 |         "multidb3": {
16 |             "url": "http://127.0.0.1:9092/sse"
17 |         }
18 |     }
19 | }
```

--------------------------------------------------------------------------------
/commit-message.txt:
--------------------------------------------------------------------------------

```
 1 | Implement TimescaleDB compression policy tools (TOOL-4)
 2 | 
 3 | This commit implements the compression policy tools for TimescaleDB integration:
 4 | 
 5 | - Added Enable/Disable compression operations
 6 | - Added Add/Remove compression policy operations
 7 | - Added Get compression settings operation
 8 | - Added tests for all compression policy operations
 9 | - Updated TIMESCALEDB_IMPLEMENTATION.md to reflect completion
10 | 
11 | Compression policies in TimescaleDB allow for automatic compression of older data chunks, which significantly reduces storage requirements while maintaining efficient query performance.
12 | 
```

--------------------------------------------------------------------------------
/pkg/internal/logger/logger.go:
--------------------------------------------------------------------------------

```go
 1 | package logger
 2 | 
 3 | import (
 4 | 	"github.com/FreePeak/db-mcp-server/internal/logger"
 5 | )
 6 | 
 7 | // Debug logs a debug message
 8 | func Debug(format string, v ...interface{}) {
 9 | 	logger.Debug(format, v...)
10 | }
11 | 
12 | // Info logs an info message
13 | func Info(format string, v ...interface{}) {
14 | 	logger.Info(format, v...)
15 | }
16 | 
17 | // Warn logs a warning message
18 | func Warn(format string, v ...interface{}) {
19 | 	logger.Warn(format, v...)
20 | }
21 | 
22 | // Error logs an error message
23 | func Error(format string, v ...interface{}) {
24 | 	logger.Error(format, v...)
25 | }
26 | 
27 | // ErrorWithStack logs an error with a stack trace
28 | func ErrorWithStack(err error) {
29 | 	logger.ErrorWithStack(err)
30 | }
31 | 
32 | // Initialize initializes the logger with specified level
33 | func Initialize(level string) {
34 | 	logger.Initialize(level)
35 | }
36 | 
```

--------------------------------------------------------------------------------
/pkg/dbtools/tx.go:
--------------------------------------------------------------------------------

```go
 1 | package dbtools
 2 | 
 3 | import (
 4 | 	"database/sql"
 5 | 	"sync"
 6 | )
 7 | 
 8 | // Map to store active transactions
 9 | var transactions = make(map[string]*sql.Tx)
10 | var transactionMutex sync.RWMutex
11 | 
12 | // StoreTransaction stores a transaction in the global map
13 | func StoreTransaction(id string, tx *sql.Tx) {
14 | 	transactionMutex.Lock()
15 | 	defer transactionMutex.Unlock()
16 | 	transactions[id] = tx
17 | }
18 | 
19 | // GetTransaction retrieves a transaction from the global map
20 | func GetTransaction(id string) (*sql.Tx, bool) {
21 | 	transactionMutex.RLock()
22 | 	defer transactionMutex.RUnlock()
23 | 	tx, ok := transactions[id]
24 | 	return tx, ok
25 | }
26 | 
27 | // RemoveTransaction removes a transaction from the global map
28 | func RemoveTransaction(id string) {
29 | 	transactionMutex.Lock()
30 | 	defer transactionMutex.Unlock()
31 | 	delete(transactions, id)
32 | }
33 | 
```

--------------------------------------------------------------------------------
/start-mcp.sh:
--------------------------------------------------------------------------------

```bash
 1 | #!/bin/bash
 2 | 
 3 | # Set environment variables
 4 | export MCP_SERVER_NAME="multidb"
 5 | export CURSOR_EDITOR=1
 6 | 
 7 | # Get the absolute path of the script's directory
 8 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
 9 | cd "$SCRIPT_DIR"
10 | 
11 | # Database config
12 | CONFIG_FILE="config.json"
13 | 
14 | # Create logs directory if it doesn't exist
15 | mkdir -p logs
16 | 
17 | # Generate a timestamp for the log filename
18 | TIMESTAMP=$(date +"%Y%m%d-%H%M%S")
19 | LOG_FILE="logs/cursor-mcp-$TIMESTAMP.log"
20 | 
21 | # Display startup message
22 | echo "Starting DB MCP Server for Cursor..." >&2
23 | echo "Config file: $CONFIG_FILE" >&2
24 | echo "MCP Server Name: $MCP_SERVER_NAME" >&2
25 | echo "Logs will be written to: $LOG_FILE" >&2
26 | 
27 | # Run the server in cursor mode with stdio transport
28 | echo "Starting server..." >&2
29 | exec ./server \
30 |   -t stdio \
31 |   -c "$CONFIG_FILE" \
32 |   2> >(tee -a "$LOG_FILE" >&2) 
```

--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------

```yaml
 1 | # These are supported funding model platforms
 2 | 
 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
 4 | patreon: # Replace with a single Patreon username
 5 | open_collective: # Replace with a single Open Collective username
 6 | ko_fi: # Replace with a single Ko-fi username
 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
 9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12 | polar: # Replace with a single Polar username
13 | buy_me_a_coffee: linhdmn
14 | thanks_dev: # Replace with a single thanks.dev username
15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
16 | 
```

--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Changelog
 2 | 
 3 | ## [v1.6.1] - 2025-04-01
 4 | 
 5 | ### Added
 6 | - OpenAI Agents SDK compatibility by adding Items property to array parameters
 7 | - Test script for verifying OpenAI Agents SDK compatibility
 8 | 
 9 | ### Fixed
10 | - Issue #8: Array parameters in tool definitions now include required `items` property
11 | - JSON Schema validation errors in OpenAI Agents SDK integration
12 | 
13 | ## [v1.6.0] - 2023-03-31
14 | 
15 | ### Changed
16 | - Upgraded cortex dependency from v1.0.3 to v1.0.4
17 | 
18 | ## [] - 2023-03-31
19 | 
20 | ### Added
21 | - Internal logging system for improved debugging and monitoring
22 | - Logger implementation for all packages
23 | 
24 | ### Fixed
25 | - Connection issues with PostgreSQL databases
26 | - Restored functionality for all MCP tools
27 | - Eliminated non-JSON RPC logging in stdio mode
28 | 
29 | ## [] - 2023-03-25
30 | 
31 | ### Added
32 | - Initial release of DB MCP Server
33 | - Multi-database connection support
34 | - Tool generation for database operations
35 | - README with guidelines on using tools in Cursor
36 | 
37 | 
```

--------------------------------------------------------------------------------
/internal/delivery/mcp/server_wrapper.go:
--------------------------------------------------------------------------------

```go
 1 | package mcp
 2 | 
 3 | import (
 4 | 	"context"
 5 | 
 6 | 	"github.com/FreePeak/cortex/pkg/server"
 7 | 	"github.com/FreePeak/cortex/pkg/types"
 8 | 
 9 | 	"github.com/FreePeak/db-mcp-server/internal/logger"
10 | )
11 | 
12 | // ServerWrapper provides a wrapper around server.MCPServer to handle type assertions
13 | type ServerWrapper struct {
14 | 	mcpServer *server.MCPServer
15 | }
16 | 
17 | // NewServerWrapper creates a new ServerWrapper
18 | func NewServerWrapper(mcpServer *server.MCPServer) *ServerWrapper {
19 | 	return &ServerWrapper{
20 | 		mcpServer: mcpServer,
21 | 	}
22 | }
23 | 
24 | // AddTool adds a tool to the server
25 | func (sw *ServerWrapper) AddTool(ctx context.Context, tool interface{}, handler func(ctx context.Context, request server.ToolCallRequest) (interface{}, error)) error {
26 | 	// Log the operation for debugging
27 | 	logger.Debug("Adding tool: %T", tool)
28 | 
29 | 	// Cast the tool to the expected type (*types.Tool)
30 | 	typedTool, ok := tool.(*types.Tool)
31 | 	if !ok {
32 | 		logger.Warn("Warning: Tool is not of type *types.Tool: %T", tool)
33 | 		return nil
34 | 	}
35 | 
36 | 	// Pass the tool to the MCPServer's AddTool method
37 | 	return sw.mcpServer.AddTool(ctx, typedTool, handler)
38 | }
39 | 
```

--------------------------------------------------------------------------------
/.github/workflows/go.yml:
--------------------------------------------------------------------------------

```yaml
 1 | name: Go Build & Test
 2 | 
 3 | on:
 4 |   push:
 5 |     branches: [ main ]
 6 |   pull_request:
 7 |     branches: [ main ]
 8 | 
 9 | jobs:
10 |   build:
11 |     name: Build & Test
12 |     runs-on: ubuntu-latest
13 |     steps:
14 |     - name: Set up Go
15 |       uses: actions/setup-go@v4
16 |       with:
17 |         go-version: '1.22'
18 |         check-latest: true
19 | 
20 |     - name: Check out code
21 |       uses: actions/checkout@v3
22 | 
23 |     - name: Update go.mod
24 |       run: |
25 |         go mod edit -go=1.22
26 |         go mod tidy
27 | 
28 |     - name: Get dependencies
29 |       run: go mod download
30 | 
31 |     - name: Build
32 |       run: go build -v ./...
33 | 
34 |     - name: Test
35 |       run: go test -v ./...
36 | 
37 |   lint:
38 |     name: Lint
39 |     runs-on: ubuntu-latest
40 |     steps:
41 |     - name: Set up Go
42 |       uses: actions/setup-go@v4
43 |       with:
44 |         go-version: '1.22'
45 |         check-latest: true
46 | 
47 |     - name: Check out code
48 |       uses: actions/checkout@v3
49 | 
50 |     - name: Update go.mod
51 |       run: |
52 |         go mod edit -go=1.22
53 |         go mod tidy
54 | 
55 |     - name: Install golangci-lint
56 |       run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.64.8
57 | 
58 |     - name: Run golangci-lint
59 |       run: golangci-lint run --timeout=5m 
```

--------------------------------------------------------------------------------
/docker-compose.timescaledb-test.yml:
--------------------------------------------------------------------------------

```yaml
 1 | version: "3.8"
 2 | 
 3 | services:
 4 |   timescaledb:
 5 |     image: timescale/timescaledb:latest-pg15
 6 |     environment:
 7 |       POSTGRES_USER: timescale_user
 8 |       POSTGRES_PASSWORD: timescale_password
 9 |       POSTGRES_DB: timescale_test
10 |     ports:
11 |       - "15435:5432"
12 |     volumes:
13 |       - ./init-scripts/timescaledb:/docker-entrypoint-initdb.d
14 |       - timescaledb_data:/var/lib/postgresql/data
15 |     healthcheck:
16 |       test: ["CMD-SHELL", "pg_isready -U timescale_user -d timescale_test"]
17 |       interval: 5s
18 |       timeout: 5s
19 |       retries: 10
20 |     networks:
21 |       - timescale-test-network
22 | 
23 |   db-mcp-server-test:
24 |     build: 
25 |       context: .
26 |       dockerfile: Dockerfile
27 |     environment:
28 |       - TZ=UTC
29 |     ports:
30 |       - "9093:9092"
31 |     volumes:
32 |       - ./config.timescaledb-test.json:/app/config.json
33 |       - ./wait-for-it.sh:/app/wait-for-it.sh
34 |     command:
35 |       [
36 |         "/bin/sh",
37 |         "-c",
38 |         "chmod +x /app/wait-for-it.sh && /app/wait-for-it.sh timescaledb 5432 30 && /app/server -t sse -c /app/config.json",
39 |       ]
40 |     depends_on:
41 |       timescaledb:
42 |         condition: service_healthy
43 |     networks:
44 |       - timescale-test-network
45 | 
46 | networks:
47 |   timescale-test-network:
48 |     driver: bridge
49 | 
50 | volumes:
51 |   timescaledb_data: 
```

--------------------------------------------------------------------------------
/pkg/dbtools/db_helpers.go:
--------------------------------------------------------------------------------

```go
 1 | package dbtools
 2 | 
 3 | import (
 4 | 	"context"
 5 | 	"database/sql"
 6 | )
 7 | 
 8 | // Database represents a database interface
 9 | // This is used in testing to provide a common interface
10 | type Database interface {
11 | 	Query(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
12 | 	QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row
13 | 	Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
14 | 	BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
15 | }
16 | 
17 | // Query executes a query and returns the result rows
18 | func Query(ctx context.Context, db Database, query string, args ...interface{}) (*sql.Rows, error) {
19 | 	return db.Query(ctx, query, args...)
20 | }
21 | 
22 | // QueryRow executes a query and returns a single row
23 | func QueryRow(ctx context.Context, db Database, query string, args ...interface{}) *sql.Row {
24 | 	return db.QueryRow(ctx, query, args...)
25 | }
26 | 
27 | // Exec executes a query that doesn't return rows
28 | func Exec(ctx context.Context, db Database, query string, args ...interface{}) (sql.Result, error) {
29 | 	return db.Exec(ctx, query, args...)
30 | }
31 | 
32 | // BeginTx starts a new transaction
33 | func BeginTx(ctx context.Context, db Database, opts *sql.TxOptions) (*sql.Tx, error) {
34 | 	return db.BeginTx(ctx, opts)
35 | }
36 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM golang:1.24-alpine AS builder
 2 | 
 3 | # Install necessary build tools
 4 | RUN apk add --no-cache make gcc musl-dev
 5 | 
 6 | # Set the working directory
 7 | WORKDIR /app
 8 | 
 9 | # Copy go.mod and go.sum files to download dependencies
10 | COPY go.mod go.sum ./
11 | 
12 | # Download dependencies
13 | RUN go mod download
14 | 
15 | # Copy the entire project
16 | COPY . .
17 | 
18 | # Build the application
19 | RUN make build
20 | 
21 | # Create a smaller production image
22 | FROM alpine:latest
23 | 
24 | # Add necessary runtime packages and network diagnostic tools
25 | RUN apk add --no-cache ca-certificates tzdata bash netcat-openbsd bind-tools iputils busybox-extras
26 | 
27 | # Set the working directory
28 | WORKDIR /app
29 | 
30 | # Copy the built binary from the builder stage
31 | COPY --from=builder /app/bin/server /app/server
32 | 
33 | # Copy default config file
34 | COPY config.json /app/config.json
35 | 
36 | # Create data and logs directories
37 | RUN mkdir -p /app/data /app/logs
38 | 
39 | # Set environment variables
40 | ENV SERVER_PORT=9092
41 | ENV TRANSPORT_MODE=sse
42 | ENV CONFIG_PATH=/app/config.json
43 | 
44 | # Expose server port
45 | EXPOSE 9092
46 | 
47 | # Provide a volume for logs only
48 | VOLUME ["/app/logs"]
49 | 
50 | # Start the MCP server with proper configuration
51 | CMD ["/bin/bash", "-c", "/app/server -t ${TRANSPORT_MODE} -p ${SERVER_PORT} -c ${CONFIG_PATH}"]
52 | 
53 | # You can override the port by passing it as a command-line argument
54 | # docker run -p 8080:8080 db-mcp-server -port 8080 
```

--------------------------------------------------------------------------------
/wait-for-it.sh:
--------------------------------------------------------------------------------

```bash
 1 | #!/bin/sh
 2 | # wait-for-it.sh - Enhanced version for database connection checking
 3 | # Usage: wait-for-it.sh host port [timeout]
 4 | 
 5 | set -e
 6 | 
 7 | host="$1"
 8 | port="$2"
 9 | timeout="${3:-30}"
10 | 
11 | if [ -z "$host" ] || [ -z "$port" ]; then
12 |   echo "Error: Host and port are required arguments"
13 |   echo "Usage: wait-for-it.sh host port [timeout]"
14 |   exit 1
15 | fi
16 | 
17 | echo "Waiting for $host:$port to be available..."
18 | start_time=$(date +%s)
19 | end_time=$((start_time + timeout))
20 | 
21 | while true; do
22 |   # Try to establish a TCP connection to the specified host and port
23 |   if nc -z -w 1 "$host" "$port" 2>/dev/null; then
24 |     echo "$host:$port is available"
25 |     exit 0
26 |   fi
27 |   
28 |   current_time=$(date +%s)
29 |   remaining=$((end_time - current_time))
30 |   
31 |   if [ $current_time -gt $end_time ]; then
32 |     echo "ERROR: Timeout waiting for $host:$port to be available after $timeout seconds"
33 |     echo "Network diagnostics:"
34 |     echo "Current container IP: $(hostname -I || echo 'Unknown')"
35 |     echo "Attempting to ping $host:"
36 |     ping -c 1 -W 1 "$host" || echo "Ping failed"
37 |     echo "Attempting DNS lookup for $host:"
38 |     nslookup "$host" || echo "DNS lookup failed"
39 |     echo "Network interfaces:"
40 |     ifconfig || ip addr show || echo "Network tools not available"
41 |     exit 1
42 |   fi
43 |   
44 |   echo "Waiting for $host:$port to be available... (${remaining}s timeout remaining)"
45 |   sleep 1
46 | done 
```

--------------------------------------------------------------------------------
/test.Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM debian:bullseye-slim
 2 | 
 3 | ENV DEBIAN_FRONTEND=noninteractive \
 4 |     GLAMA_VERSION="0.2.0" \
 5 |     PATH="/home/service-user/.local/bin:${PATH}"
 6 | 
 7 | RUN (groupadd -r service-user) && (useradd -u 1987 -r -m -g service-user service-user) && (mkdir -p /home/service-user/.local/bin /app) && (chown -R service-user:service-user /home/service-user /app) && (apt-get update) && (apt-get install -y --no-install-recommends build-essential curl wget software-properties-common libssl-dev zlib1g-dev git) && (rm -rf /var/lib/apt/lists/*) && (curl -fsSL https://deb.nodesource.com/setup_22.x | bash -) && (apt-get install -y nodejs) && (apt-get clean) && (npm install -g [email protected]) && (npm install -g [email protected]) && (npm install -g [email protected]) && (node --version) && (curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR="/usr/local/bin" sh) && (uv python install 3.13 --default --preview) && (ln -s $(uv python find) /usr/local/bin/python) && (python --version) && (apt-get clean) && (rm -rf /var/lib/apt/lists/*) && (rm -rf /tmp/*) && (rm -rf /var/tmp/*) && (su - service-user -c "uv python install 3.13 --default --preview && python --version")
 8 | 
 9 | USER service-user
10 | 
11 | WORKDIR /app
12 | 
13 | RUN git clone https://github.com/FreePeak/db-mcp-server . && git checkout 71ffa5b5f5bb197a4732e03889b47da7ef09d819
14 | 
15 | RUN (chmod +x /app/docker-wrapper.sh)
16 | 
17 | RUN ls -la
18 | 
19 | RUN cat /app/docker-wrapper.sh
20 | 
21 | CMD ["mcp-proxy", "/app/docker-wrapper.sh"]
```

--------------------------------------------------------------------------------
/internal/delivery/mcp/mock_test.go:
--------------------------------------------------------------------------------

```go
 1 | package mcp
 2 | 
 3 | import (
 4 | 	"context"
 5 | 
 6 | 	"github.com/stretchr/testify/mock"
 7 | )
 8 | 
 9 | // MockDatabaseUseCase is a mock implementation of the database use case
10 | type MockDatabaseUseCase struct {
11 | 	mock.Mock
12 | }
13 | 
14 | // ExecuteStatement mocks the ExecuteStatement method
15 | func (m *MockDatabaseUseCase) ExecuteStatement(ctx context.Context, dbID, statement string, params []interface{}) (string, error) {
16 | 	args := m.Called(ctx, dbID, statement, params)
17 | 	return args.String(0), args.Error(1)
18 | }
19 | 
20 | // GetDatabaseType mocks the GetDatabaseType method
21 | func (m *MockDatabaseUseCase) GetDatabaseType(dbID string) (string, error) {
22 | 	args := m.Called(dbID)
23 | 	return args.String(0), args.Error(1)
24 | }
25 | 
26 | // ExecuteQuery mocks the ExecuteQuery method
27 | func (m *MockDatabaseUseCase) ExecuteQuery(ctx context.Context, dbID, query string, params []interface{}) (string, error) {
28 | 	args := m.Called(ctx, dbID, query, params)
29 | 	return args.String(0), args.Error(1)
30 | }
31 | 
32 | // ExecuteTransaction mocks the ExecuteTransaction method
33 | func (m *MockDatabaseUseCase) ExecuteTransaction(ctx context.Context, dbID, action string, txID string, statement string, params []interface{}, readOnly bool) (string, map[string]interface{}, error) {
34 | 	args := m.Called(ctx, dbID, action, txID, statement, params, readOnly)
35 | 	return args.String(0), args.Get(1).(map[string]interface{}), args.Error(2)
36 | }
37 | 
38 | // GetDatabaseInfo mocks the GetDatabaseInfo method
39 | func (m *MockDatabaseUseCase) GetDatabaseInfo(dbID string) (map[string]interface{}, error) {
40 | 	args := m.Called(dbID)
41 | 	return args.Get(0).(map[string]interface{}), args.Error(1)
42 | }
43 | 
44 | // ListDatabases mocks the ListDatabases method
45 | func (m *MockDatabaseUseCase) ListDatabases() []string {
46 | 	args := m.Called()
47 | 	return args.Get(0).([]string)
48 | }
49 | 
```

--------------------------------------------------------------------------------
/config.timescaledb-test.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "log_level": "debug",
 3 |   "databases": [
 4 |     {
 5 |       "id": "timescaledb_test",
 6 |       "type": "postgres",
 7 |       "host": "timescaledb",
 8 |       "port": 5432,
 9 |       "user": "timescale_user",
10 |       "password": "timescale_password",
11 |       "database": "timescale_test",
12 |       "display_name": "TimescaleDB Test",
13 |       "description": "PostgreSQL with TimescaleDB extension for testing time-series functionality",
14 |       "pool_max_conn": 10,
15 |       "max_idle_conn": 5,
16 |       "conn_max_lifetime": 3600,
17 |       "enable_user_query": true,
18 |       "features": {
19 |         "extensions": [
20 |           "timescaledb"
21 |         ]
22 |       }
23 |     },
24 |     {
25 |       "id": "timescaledb_readonly",
26 |       "type": "postgres",
27 |       "host": "timescaledb",
28 |       "port": 5432,
29 |       "user": "test_readonly",
30 |       "password": "readonly_password",
31 |       "database": "timescale_test",
32 |       "display_name": "TimescaleDB Read-Only",
33 |       "description": "Read-only access to TimescaleDB test database",
34 |       "pool_max_conn": 5,
35 |       "max_idle_conn": 2,
36 |       "conn_max_lifetime": 3600,
37 |       "enable_user_query": true,
38 |       "features": {
39 |         "extensions": [
40 |           "timescaledb"
41 |         ]
42 |       }
43 |     },
44 |     {
45 |       "id": "timescaledb_readwrite",
46 |       "type": "postgres",
47 |       "host": "timescaledb",
48 |       "port": 5432,
49 |       "user": "test_readwrite",
50 |       "password": "readwrite_password",
51 |       "database": "timescale_test",
52 |       "display_name": "TimescaleDB Read-Write",
53 |       "description": "Read-write access to TimescaleDB test database",
54 |       "pool_max_conn": 5,
55 |       "max_idle_conn": 2,
56 |       "conn_max_lifetime": 3600,
57 |       "enable_user_query": true,
58 |       "features": {
59 |         "extensions": [
60 |           "timescaledb"
61 |         ]
62 |       }
63 |     }
64 |   ],
65 |   "server": {
66 |     "port": 9092,
67 |     "host": "0.0.0.0",
68 |     "cors": {
69 |       "allowed_origins": ["*"],
70 |       "allowed_methods": ["GET", "POST", "OPTIONS"],
71 |       "allowed_headers": ["Content-Type", "Authorization"]
72 |     }
73 |   }
74 | } 
```

--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |     "connections": [
 3 |         {
 4 |             "id": "mysql1",
 5 |             "type": "mysql",
 6 |             "host": "mysql1",
 7 |             "port": 3306,
 8 |             "name": "db1",
 9 |             "user": "user1",
10 |             "password": "password1",
11 |             "max_open_conns": 20,
12 |             "max_idle_conns": 5,
13 |             "conn_max_lifetime_seconds": 300,
14 |             "conn_max_idle_time_seconds": 60,
15 |             "query_timeout": 60
16 |         },
17 |         {
18 |             "id": "mysql2",
19 |             "type": "mysql",
20 |             "host": "mysql2",
21 |             "port": 3306,
22 |             "name": "db2",
23 |             "user": "user2",
24 |             "password": "password2",
25 |             "max_open_conns": 20,
26 |             "max_idle_conns": 5,
27 |             "conn_max_lifetime_seconds": 300,
28 |             "conn_max_idle_time_seconds": 60,
29 |             "query_timeout": 45
30 |         },
31 |         {
32 |             "id": "postgres1",
33 |             "type": "postgres",
34 |             "host": "postgres1",
35 |             "port": 5432,
36 |             "name": "db1",
37 |             "user": "user1",
38 |             "password": "password1",
39 |             "ssl_mode": "disable",
40 |             "application_name": "db-mcp-server",
41 |             "connect_timeout": 10,
42 |             "query_timeout": 60,
43 |             "max_open_conns": 20,
44 |             "max_idle_conns": 5,
45 |             "conn_max_lifetime_seconds": 300,
46 |             "conn_max_idle_time_seconds": 60
47 |         },
48 |         {
49 |             "id": "postgres2",
50 |             "type": "postgres",
51 |             "host": "postgres2",
52 |             "port": 5432,
53 |             "name": "db2",
54 |             "user": "user2",
55 |             "password": "password2",
56 |             "ssl_mode": "prefer",
57 |             "application_name": "db-mcp-server",
58 |             "connect_timeout": 15,
59 |             "query_timeout": 90,
60 |             "target_session_attrs": "any",
61 |             "options": {
62 |                 "application_name": "db-mcp-server",
63 |                 "client_encoding": "UTF8"
64 |             },
65 |             "max_open_conns": 25,
66 |             "max_idle_conns": 5,
67 |             "conn_max_lifetime_seconds": 300,
68 |             "conn_max_idle_time_seconds": 60
69 |         },
70 |         {
71 |             "id": "postgres3",
72 |             "type": "postgres",
73 |             "host": "postgres3",
74 |             "port": 5432,
75 |             "name": "screenerdb",
76 |             "user": "screener",
77 |             "password": "screenerpass",
78 |             "ssl_mode": "disable",
79 |             "application_name": "db-mcp-server",
80 |             "connect_timeout": 10,
81 |             "query_timeout": 120,
82 |             "max_open_conns": 20,
83 |             "max_idle_conns": 5,
84 |             "conn_max_lifetime_seconds": 300,
85 |             "conn_max_idle_time_seconds": 60
86 |         }
87 |     ]
88 | }
89 | 
```

--------------------------------------------------------------------------------
/pkg/jsonrpc/jsonrpc.go:
--------------------------------------------------------------------------------

```go
  1 | package jsonrpc
  2 | 
  3 | import (
  4 | 	"fmt"
  5 | )
  6 | 
  7 | // Version is the JSON-RPC version string
  8 | const Version = "2.0"
  9 | 
 10 | // Request represents a JSON-RPC request
 11 | type Request struct {
 12 | 	JSONRPC string      `json:"jsonrpc"`
 13 | 	ID      interface{} `json:"id,omitempty"`
 14 | 	Method  string      `json:"method"`
 15 | 	Params  interface{} `json:"params,omitempty"`
 16 | }
 17 | 
 18 | // IsNotification returns true if the request is a notification (has no ID)
 19 | func (r *Request) IsNotification() bool {
 20 | 	return r.ID == nil
 21 | }
 22 | 
 23 | // Response represents a JSON-RPC response
 24 | type Response struct {
 25 | 	JSONRPC string      `json:"jsonrpc"`
 26 | 	ID      interface{} `json:"id,omitempty"`
 27 | 	Result  interface{} `json:"result,omitempty"`
 28 | 	Error   *Error      `json:"error,omitempty"`
 29 | }
 30 | 
 31 | // Error represents a JSON-RPC error
 32 | type Error struct {
 33 | 	Code    int         `json:"code"`
 34 | 	Message string      `json:"message"`
 35 | 	Data    interface{} `json:"data,omitempty"`
 36 | }
 37 | 
 38 | // Standard error codes
 39 | const (
 40 | 	ParseErrorCode     = -32700
 41 | 	InvalidRequestCode = -32600
 42 | 	MethodNotFoundCode = -32601
 43 | 	InvalidParamsCode  = -32602
 44 | 	InternalErrorCode  = -32603
 45 | )
 46 | 
 47 | // Error returns a string representation of the error
 48 | func (e *Error) Error() string {
 49 | 	return fmt.Sprintf("JSON-RPC error %d: %s", e.Code, e.Message)
 50 | }
 51 | 
 52 | // NewResponse creates a new response for the given request
 53 | func NewResponse(req *Request, result interface{}, err *Error) *Response {
 54 | 	resp := &Response{
 55 | 		JSONRPC: Version,
 56 | 		ID:      req.ID,
 57 | 	}
 58 | 
 59 | 	if err != nil {
 60 | 		resp.Error = err
 61 | 	} else {
 62 | 		resp.Result = result
 63 | 	}
 64 | 
 65 | 	return resp
 66 | }
 67 | 
 68 | // NewError creates a new Error with the given code and message
 69 | func NewError(code int, message string, data interface{}) *Error {
 70 | 	return &Error{
 71 | 		Code:    code,
 72 | 		Message: message,
 73 | 		Data:    data,
 74 | 	}
 75 | }
 76 | 
 77 | // ParseError creates a Parse Error
 78 | func ParseError(data interface{}) *Error {
 79 | 	return &Error{
 80 | 		Code:    ParseErrorCode,
 81 | 		Message: "Parse error",
 82 | 		Data:    data,
 83 | 	}
 84 | }
 85 | 
 86 | // InvalidRequestError creates an Invalid Request error
 87 | func InvalidRequestError(data interface{}) *Error {
 88 | 	return &Error{
 89 | 		Code:    InvalidRequestCode,
 90 | 		Message: "Invalid request",
 91 | 		Data:    data,
 92 | 	}
 93 | }
 94 | 
 95 | // MethodNotFoundError creates a Method Not Found error
 96 | func MethodNotFoundError(method string) *Error {
 97 | 	return &Error{
 98 | 		Code:    MethodNotFoundCode,
 99 | 		Message: "Method not found",
100 | 		Data:    method,
101 | 	}
102 | }
103 | 
104 | // InvalidParamsError creates an Invalid Params error
105 | func InvalidParamsError(data interface{}) *Error {
106 | 	return &Error{
107 | 		Code:    InvalidParamsCode,
108 | 		Message: "Invalid params",
109 | 		Data:    data,
110 | 	}
111 | }
112 | 
113 | // InternalError creates an Internal Error
114 | func InternalError(data interface{}) *Error {
115 | 	return &Error{
116 | 		Code:    InternalErrorCode,
117 | 		Message: "Internal error",
118 | 		Data:    data,
119 | 	}
120 | }
121 | 
```

--------------------------------------------------------------------------------
/init-scripts/timescaledb/01-init.sql:
--------------------------------------------------------------------------------

```sql
 1 | -- Enable TimescaleDB extension
 2 | CREATE EXTENSION IF NOT EXISTS timescaledb;
 3 | 
 4 | -- Create sensor data schema
 5 | CREATE SCHEMA IF NOT EXISTS test_data;
 6 | 
 7 | -- Create sensor_readings table for hypertable tests
 8 | CREATE TABLE test_data.sensor_readings (
 9 |     time TIMESTAMPTZ NOT NULL,
10 |     sensor_id INTEGER NOT NULL,
11 |     temperature DOUBLE PRECISION,
12 |     humidity DOUBLE PRECISION,
13 |     pressure DOUBLE PRECISION,
14 |     battery_level DOUBLE PRECISION,
15 |     location VARCHAR(50)
16 | );
17 | 
18 | -- Convert to hypertable
19 | SELECT create_hypertable('test_data.sensor_readings', 'time');
20 | 
21 | -- Create weather data for continuous aggregate tests
22 | CREATE TABLE test_data.weather_observations (
23 |     time TIMESTAMPTZ NOT NULL,
24 |     station_id INTEGER NOT NULL,
25 |     temperature DOUBLE PRECISION,
26 |     precipitation DOUBLE PRECISION,
27 |     wind_speed DOUBLE PRECISION,
28 |     wind_direction DOUBLE PRECISION,
29 |     atmospheric_pressure DOUBLE PRECISION
30 | );
31 | 
32 | -- Convert to hypertable
33 | SELECT create_hypertable('test_data.weather_observations', 'time');
34 | 
35 | -- Create device metrics for compression tests
36 | CREATE TABLE test_data.device_metrics (
37 |     time TIMESTAMPTZ NOT NULL,
38 |     device_id INTEGER NOT NULL,
39 |     cpu_usage DOUBLE PRECISION,
40 |     memory_usage DOUBLE PRECISION,
41 |     network_in DOUBLE PRECISION,
42 |     network_out DOUBLE PRECISION,
43 |     disk_io DOUBLE PRECISION
44 | );
45 | 
46 | -- Convert to hypertable
47 | SELECT create_hypertable('test_data.device_metrics', 'time');
48 | 
49 | -- Create stock data for time-series analysis
50 | CREATE TABLE test_data.stock_prices (
51 |     time TIMESTAMPTZ NOT NULL,
52 |     symbol VARCHAR(10) NOT NULL,
53 |     open_price DOUBLE PRECISION,
54 |     high_price DOUBLE PRECISION,
55 |     low_price DOUBLE PRECISION,
56 |     close_price DOUBLE PRECISION,
57 |     volume INTEGER
58 | );
59 | 
60 | -- Convert to hypertable
61 | SELECT create_hypertable('test_data.stock_prices', 'time');
62 | 
63 | -- Create a regular table for comparison tests
64 | CREATE TABLE test_data.regular_table (
65 |     id SERIAL PRIMARY KEY,
66 |     name VARCHAR(100),
67 |     value DOUBLE PRECISION,
68 |     created_at TIMESTAMPTZ DEFAULT NOW()
69 | );
70 | 
71 | -- Create a table for testing space partitioning
72 | CREATE TABLE test_data.multi_partition_data (
73 |     time TIMESTAMPTZ NOT NULL,
74 |     device_id INTEGER NOT NULL,
75 |     region VARCHAR(50),
76 |     metric_value DOUBLE PRECISION
77 | );
78 | 
79 | -- Convert to hypertable with space partitioning
80 | SELECT create_hypertable(
81 |     'test_data.multi_partition_data', 
82 |     'time', 
83 |     'device_id', 
84 |     number_partitions => 4
85 | );
86 | 
87 | -- Create test users
88 | CREATE USER test_readonly WITH PASSWORD 'readonly_password';
89 | CREATE USER test_readwrite WITH PASSWORD 'readwrite_password';
90 | 
91 | -- Grant permissions
92 | GRANT USAGE ON SCHEMA test_data TO test_readonly, test_readwrite;
93 | GRANT SELECT ON ALL TABLES IN SCHEMA test_data TO test_readonly, test_readwrite;
94 | GRANT INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA test_data TO test_readwrite;
95 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA test_data TO test_readwrite; 
```

--------------------------------------------------------------------------------
/internal/domain/database.go:
--------------------------------------------------------------------------------

```go
  1 | package domain
  2 | 
  3 | import (
  4 | 	"context"
  5 | )
  6 | 
  7 | // Database represents a database connection and operations
  8 | type Database interface {
  9 | 	Query(ctx context.Context, query string, args ...interface{}) (Rows, error)
 10 | 	Exec(ctx context.Context, statement string, args ...interface{}) (Result, error)
 11 | 	Begin(ctx context.Context, opts *TxOptions) (Tx, error)
 12 | }
 13 | 
 14 | // Rows represents database query results
 15 | type Rows interface {
 16 | 	Close() error
 17 | 	Columns() ([]string, error)
 18 | 	Next() bool
 19 | 	Scan(dest ...interface{}) error
 20 | 	Err() error
 21 | }
 22 | 
 23 | // Result represents the result of a database operation
 24 | type Result interface {
 25 | 	RowsAffected() (int64, error)
 26 | 	LastInsertId() (int64, error)
 27 | }
 28 | 
 29 | // Tx represents a database transaction
 30 | type Tx interface {
 31 | 	Commit() error
 32 | 	Rollback() error
 33 | 	Query(ctx context.Context, query string, args ...interface{}) (Rows, error)
 34 | 	Exec(ctx context.Context, statement string, args ...interface{}) (Result, error)
 35 | }
 36 | 
 37 | // TxOptions represents options for starting a transaction
 38 | type TxOptions struct {
 39 | 	ReadOnly bool
 40 | }
 41 | 
 42 | // PerformanceAnalyzer for analyzing database query performance
 43 | type PerformanceAnalyzer interface {
 44 | 	GetSlowQueries(limit int) ([]SlowQuery, error)
 45 | 	GetMetrics() (PerformanceMetrics, error)
 46 | 	AnalyzeQuery(query string) (QueryAnalysis, error)
 47 | 	Reset() error
 48 | 	SetThreshold(threshold int) error
 49 | }
 50 | 
 51 | // SlowQuery represents a slow query that has been recorded
 52 | type SlowQuery struct {
 53 | 	Query     string
 54 | 	Duration  float64
 55 | 	Timestamp string
 56 | }
 57 | 
 58 | // PerformanceMetrics represents database performance metrics
 59 | type PerformanceMetrics struct {
 60 | 	TotalQueries  int
 61 | 	AvgDuration   float64
 62 | 	MaxDuration   float64
 63 | 	SlowQueries   int
 64 | 	Threshold     int
 65 | 	LastResetTime string
 66 | }
 67 | 
 68 | // QueryAnalysis represents the analysis of a SQL query
 69 | type QueryAnalysis struct {
 70 | 	Query       string
 71 | 	ExplainPlan string
 72 | }
 73 | 
 74 | // SchemaInfo represents database schema information
 75 | type SchemaInfo interface {
 76 | 	GetTables() ([]string, error)
 77 | 	GetColumns(table string) ([]ColumnInfo, error)
 78 | 	GetIndexes(table string) ([]IndexInfo, error)
 79 | 	GetConstraints(table string) ([]ConstraintInfo, error)
 80 | }
 81 | 
 82 | // ColumnInfo represents information about a database column
 83 | type ColumnInfo struct {
 84 | 	Name     string
 85 | 	Type     string
 86 | 	Nullable bool
 87 | 	Default  string
 88 | }
 89 | 
 90 | // IndexInfo represents information about a database index
 91 | type IndexInfo struct {
 92 | 	Name    string
 93 | 	Table   string
 94 | 	Columns []string
 95 | 	Unique  bool
 96 | 	Primary bool
 97 | }
 98 | 
 99 | // ConstraintInfo represents information about a database constraint
100 | type ConstraintInfo struct {
101 | 	Name              string
102 | 	Type              string
103 | 	Table             string
104 | 	Columns           []string
105 | 	ReferencedTable   string
106 | 	ReferencedColumns []string
107 | }
108 | 
109 | // DatabaseRepository defines methods for managing database connections
110 | type DatabaseRepository interface {
111 | 	GetDatabase(id string) (Database, error)
112 | 	ListDatabases() []string
113 | 	GetDatabaseType(id string) (string, error)
114 | }
115 | 
```

--------------------------------------------------------------------------------
/pkg/db/timescale/config.go:
--------------------------------------------------------------------------------

```go
  1 | package timescale
  2 | 
  3 | import (
  4 | 	"github.com/FreePeak/db-mcp-server/pkg/db"
  5 | )
  6 | 
  7 | // DBConfig extends PostgreSQL configuration with TimescaleDB-specific options
  8 | type DBConfig struct {
  9 | 	// Inherit PostgreSQL config
 10 | 	PostgresConfig db.Config
 11 | 
 12 | 	// TimescaleDB-specific settings
 13 | 	ChunkTimeInterval string             // Default chunk time interval (e.g., "7 days")
 14 | 	RetentionPolicy   *RetentionPolicy   // Data retention configuration
 15 | 	CompressionPolicy *CompressionPolicy // Compression configuration
 16 | 	UseTimescaleDB    bool               // Enable TimescaleDB features (default: true)
 17 | }
 18 | 
 19 | // RetentionPolicy defines how long to keep data in TimescaleDB
 20 | type RetentionPolicy struct {
 21 | 	Enabled    bool
 22 | 	Duration   string // e.g., "90 days"
 23 | 	DropChunks bool   // Whether to physically drop chunks (vs logical deletion)
 24 | }
 25 | 
 26 | // CompressionPolicy defines how and when to compress data
 27 | type CompressionPolicy struct {
 28 | 	Enabled       bool
 29 | 	After         string // e.g., "7 days"
 30 | 	OrderBy       string // Column to order by during compression
 31 | 	SegmentBy     string // Column to segment by during compression
 32 | 	CompressChunk bool   // Whether to manually compress chunks
 33 | }
 34 | 
 35 | // NewDefaultTimescaleDBConfig creates a DBConfig with default values
 36 | func NewDefaultTimescaleDBConfig(pgConfig db.Config) DBConfig {
 37 | 	return DBConfig{
 38 | 		PostgresConfig:    pgConfig,
 39 | 		ChunkTimeInterval: "7 days",
 40 | 		UseTimescaleDB:    true,
 41 | 		RetentionPolicy: &RetentionPolicy{
 42 | 			Enabled:    false,
 43 | 			Duration:   "90 days",
 44 | 			DropChunks: true,
 45 | 		},
 46 | 		CompressionPolicy: &CompressionPolicy{
 47 | 			Enabled:       false,
 48 | 			After:         "30 days",
 49 | 			CompressChunk: true,
 50 | 		},
 51 | 	}
 52 | }
 53 | 
 54 | // IsTimescaleDB returns true if the config is for TimescaleDB
 55 | func IsTimescaleDB(config db.Config) bool {
 56 | 	// TimescaleDB is a PostgreSQL extension, so the driver must be postgres
 57 | 	if config.Type != "postgres" {
 58 | 		return false
 59 | 	}
 60 | 
 61 | 	// Check if TimescaleDB extension is explicitly enabled in options
 62 | 	if config.Options != nil {
 63 | 		if val, ok := config.Options["use_timescaledb"]; ok {
 64 | 			return val == "true" || val == "1"
 65 | 		}
 66 | 	}
 67 | 
 68 | 	// Default to true for PostgreSQL connections
 69 | 	return true
 70 | }
 71 | 
 72 | // FromDBConfig converts a standard db.Config to a DBConfig
 73 | func FromDBConfig(config db.Config) DBConfig {
 74 | 	tsdbConfig := NewDefaultTimescaleDBConfig(config)
 75 | 
 76 | 	// Override with custom settings from options if present
 77 | 	if config.Options != nil {
 78 | 		if val, ok := config.Options["chunk_time_interval"]; ok {
 79 | 			tsdbConfig.ChunkTimeInterval = val
 80 | 		}
 81 | 
 82 | 		if val, ok := config.Options["retention_duration"]; ok {
 83 | 			tsdbConfig.RetentionPolicy.Duration = val
 84 | 			tsdbConfig.RetentionPolicy.Enabled = true
 85 | 		}
 86 | 
 87 | 		if val, ok := config.Options["compression_after"]; ok {
 88 | 			tsdbConfig.CompressionPolicy.After = val
 89 | 			tsdbConfig.CompressionPolicy.Enabled = true
 90 | 		}
 91 | 
 92 | 		if val, ok := config.Options["segment_by"]; ok {
 93 | 			tsdbConfig.CompressionPolicy.SegmentBy = val
 94 | 		}
 95 | 
 96 | 		if val, ok := config.Options["order_by"]; ok {
 97 | 			tsdbConfig.CompressionPolicy.OrderBy = val
 98 | 		}
 99 | 	}
100 | 
101 | 	return tsdbConfig
102 | }
103 | 
```

--------------------------------------------------------------------------------
/internal/delivery/mcp/response.go:
--------------------------------------------------------------------------------

```go
  1 | package mcp
  2 | 
  3 | import (
  4 | 	"fmt"
  5 | )
  6 | 
  7 | // TextContent represents a text content item in a response
  8 | type TextContent struct {
  9 | 	Type string `json:"type"`
 10 | 	Text string `json:"text"`
 11 | }
 12 | 
 13 | // Response is a standardized response format for MCP tools
 14 | type Response struct {
 15 | 	Content  []TextContent          `json:"content"`
 16 | 	Metadata map[string]interface{} `json:"metadata,omitempty"`
 17 | }
 18 | 
 19 | // NewResponse creates a new empty Response
 20 | func NewResponse() *Response {
 21 | 	return &Response{
 22 | 		Content: make([]TextContent, 0),
 23 | 	}
 24 | }
 25 | 
 26 | // WithText adds a text content item to the response
 27 | func (r *Response) WithText(text string) *Response {
 28 | 	r.Content = append(r.Content, TextContent{
 29 | 		Type: "text",
 30 | 		Text: text,
 31 | 	})
 32 | 	return r
 33 | }
 34 | 
 35 | // WithMetadata adds metadata to the response
 36 | func (r *Response) WithMetadata(key string, value interface{}) *Response {
 37 | 	if r.Metadata == nil {
 38 | 		r.Metadata = make(map[string]interface{})
 39 | 	}
 40 | 	r.Metadata[key] = value
 41 | 	return r
 42 | }
 43 | 
 44 | // FromString creates a response from a string
 45 | func FromString(text string) *Response {
 46 | 	return NewResponse().WithText(text)
 47 | }
 48 | 
 49 | // FromError creates an error response
 50 | func FromError(err error) (interface{}, error) {
 51 | 	return nil, err
 52 | }
 53 | 
 54 | // FormatResponse converts any response type to a properly formatted MCP response
 55 | func FormatResponse(response interface{}, err error) (interface{}, error) {
 56 | 	if err != nil {
 57 | 		// Already formatted as JSON-RPC error
 58 | 		return response, err
 59 | 	}
 60 | 
 61 | 	// For nil responses, return empty object to avoid null result
 62 | 	if response == nil {
 63 | 		return NewResponse(), nil
 64 | 	}
 65 | 
 66 | 	// If response is already an Response, return it
 67 | 	if mcpResp, ok := response.(*Response); ok {
 68 | 		// If content is empty, return a new empty response to ensure consistency
 69 | 		if len(mcpResp.Content) == 0 {
 70 | 			return NewResponse(), nil
 71 | 		}
 72 | 		return mcpResp, nil
 73 | 	}
 74 | 
 75 | 	// Handle string responses, checking for empty strings
 76 | 	if strResponse, ok := response.(string); ok {
 77 | 		if strResponse == "" || strResponse == "[]" {
 78 | 			return NewResponse(), nil
 79 | 		}
 80 | 		return FromString(strResponse), nil
 81 | 	}
 82 | 
 83 | 	// If response is already properly formatted with content as an array
 84 | 	if respMap, ok := response.(map[string]interface{}); ok {
 85 | 		// If the map is empty, return a new empty response
 86 | 		if len(respMap) == 0 {
 87 | 			return NewResponse(), nil
 88 | 		}
 89 | 
 90 | 		if content, exists := respMap["content"]; exists {
 91 | 			if contentSlice, isSlice := content.([]interface{}); isSlice {
 92 | 				// If content is an empty slice, return a new empty response
 93 | 				if len(contentSlice) == 0 {
 94 | 					return NewResponse(), nil
 95 | 				}
 96 | 				return respMap, nil
 97 | 			}
 98 | 		}
 99 | 
100 | 		// If it has a metadata field but not a properly formatted content field
101 | 		if _, hasContent := respMap["content"]; !hasContent {
102 | 			newResp := NewResponse().WithText(fmt.Sprintf("%v", respMap))
103 | 
104 | 			// Copy over metadata if it exists
105 | 			if metadata, hasMetadata := respMap["metadata"]; hasMetadata {
106 | 				if metadataMap, ok := metadata.(map[string]interface{}); ok {
107 | 					for k, v := range metadataMap {
108 | 						newResp.WithMetadata(k, v)
109 | 					}
110 | 				}
111 | 			}
112 | 
113 | 			return newResp, nil
114 | 		}
115 | 	}
116 | 
117 | 	// For any other type, convert to string and wrap in proper content format
118 | 	return FromString(fmt.Sprintf("%v", response)), nil
119 | }
120 | 
```

--------------------------------------------------------------------------------
/pkg/logger/logger.go:
--------------------------------------------------------------------------------

```go
  1 | package logger
  2 | 
  3 | import (
  4 | 	"fmt"
  5 | 	"os"
  6 | 	"path/filepath"
  7 | 	"strings"
  8 | 	"time"
  9 | 
 10 | 	intLogger "github.com/FreePeak/db-mcp-server/internal/logger"
 11 | )
 12 | 
 13 | var (
 14 | 	initialized bool = false
 15 | 	level       string
 16 | 	logFile     *os.File
 17 | )
 18 | 
 19 | // Initialize sets up the logger with the specified level
 20 | func Initialize(logLevel string) {
 21 | 	level = logLevel
 22 | 
 23 | 	// If in stdio mode, redirect logs to a file
 24 | 	if os.Getenv("TRANSPORT_MODE") == "stdio" {
 25 | 		// Create logs directory if it doesn't exist
 26 | 		logsDir := "logs"
 27 | 		if _, err := os.Stat(logsDir); os.IsNotExist(err) {
 28 | 			if err := os.Mkdir(logsDir, 0755); err != nil {
 29 | 				fmt.Fprintf(os.Stderr, "Failed to create logs directory: %v\n", err)
 30 | 			}
 31 | 		}
 32 | 
 33 | 		// Create log file with timestamp
 34 | 		timestamp := time.Now().Format("20060102-150405")
 35 | 		logFilePath := filepath.Join(logsDir, fmt.Sprintf("pkg-logger-%s.log", timestamp))
 36 | 
 37 | 		// Try to open the log file
 38 | 		var err error
 39 | 		logFile, err = os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
 40 | 		if err != nil {
 41 | 			// Last message to stderr before giving up
 42 | 			fmt.Fprintf(os.Stderr, "Failed to create pkg logger file: %v\n", err)
 43 | 		}
 44 | 	}
 45 | 
 46 | 	initialized = true
 47 | }
 48 | 
 49 | // ensureInitialized makes sure the logger is initialized
 50 | func ensureInitialized() {
 51 | 	if !initialized {
 52 | 		// Default to info level
 53 | 		Initialize("info")
 54 | 	}
 55 | }
 56 | 
 57 | // Debug logs a debug message
 58 | func Debug(format string, v ...interface{}) {
 59 | 	ensureInitialized()
 60 | 	if !shouldLog("debug") {
 61 | 		return
 62 | 	}
 63 | 	logMessage("DEBUG", format, v...)
 64 | }
 65 | 
 66 | // Info logs an info message
 67 | func Info(format string, v ...interface{}) {
 68 | 	ensureInitialized()
 69 | 	if !shouldLog("info") {
 70 | 		return
 71 | 	}
 72 | 	logMessage("INFO", format, v...)
 73 | }
 74 | 
 75 | // Warn logs a warning message
 76 | func Warn(format string, v ...interface{}) {
 77 | 	ensureInitialized()
 78 | 	if !shouldLog("warn") {
 79 | 		return
 80 | 	}
 81 | 	logMessage("WARN", format, v...)
 82 | }
 83 | 
 84 | // Error logs an error message
 85 | func Error(format string, v ...interface{}) {
 86 | 	ensureInitialized()
 87 | 	if !shouldLog("error") {
 88 | 		return
 89 | 	}
 90 | 	logMessage("ERROR", format, v...)
 91 | }
 92 | 
 93 | // shouldLog determines if we should log a message based on the level
 94 | func shouldLog(msgLevel string) bool {
 95 | 	// Always try to use the internal logger first as it's more sophisticated
 96 | 	// and handles stdio mode properly
 97 | 	levels := map[string]int{
 98 | 		"debug": 0,
 99 | 		"info":  1,
100 | 		"warn":  2,
101 | 		"error": 3,
102 | 	}
103 | 
104 | 	currentLevel := levels[strings.ToLower(level)]
105 | 	messageLevel := levels[strings.ToLower(msgLevel)]
106 | 
107 | 	return messageLevel >= currentLevel
108 | }
109 | 
110 | // logMessage sends a log message to the appropriate destination
111 | func logMessage(level string, format string, v ...interface{}) {
112 | 	// Forward to the internal logger if possible
113 | 	message := fmt.Sprintf(format, v...)
114 | 
115 | 	// If we're in stdio mode, avoid stdout completely
116 | 	if os.Getenv("TRANSPORT_MODE") == "stdio" {
117 | 		if logFile != nil {
118 | 			// Format the message with timestamp
119 | 			timestamp := time.Now().Format("2006-01-02 15:04:05")
120 | 			formattedMsg := fmt.Sprintf("[%s] %s: %s\n", timestamp, level, message)
121 | 
122 | 			// Write to log file directly
123 | 			if _, err := logFile.WriteString(formattedMsg); err != nil {
124 | 				// We can't use stdout since we're in stdio mode, so we have to suppress this error
125 | 				// or write to stderr as a last resort
126 | 				fmt.Fprintf(os.Stderr, "Failed to write to log file: %v\n", err)
127 | 			}
128 | 		}
129 | 		return
130 | 	}
131 | 
132 | 	// For non-stdio mode or if file writing failed
133 | 	switch strings.ToUpper(level) {
134 | 	case "DEBUG":
135 | 		intLogger.Debug(message)
136 | 	case "INFO":
137 | 		intLogger.Info(message)
138 | 	case "WARN":
139 | 		intLogger.Warn(message)
140 | 	case "ERROR":
141 | 		intLogger.Error(message)
142 | 	}
143 | }
144 | 
```

--------------------------------------------------------------------------------
/pkg/db/db_test.go:
--------------------------------------------------------------------------------

```go
  1 | package db
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"database/sql"
  6 | 	"testing"
  7 | 	"time"
  8 | 
  9 | 	"github.com/stretchr/testify/assert"
 10 | )
 11 | 
 12 | func TestNewDatabase(t *testing.T) {
 13 | 	tests := []struct {
 14 | 		name      string
 15 | 		config    Config
 16 | 		expectErr bool
 17 | 	}{
 18 | 		{
 19 | 			name: "valid mysql config",
 20 | 			config: Config{
 21 | 				Type:     "mysql",
 22 | 				Host:     "localhost",
 23 | 				Port:     3306,
 24 | 				User:     "user",
 25 | 				Password: "password",
 26 | 				Name:     "testdb",
 27 | 			},
 28 | 			expectErr: false, // In real test this would be true unless DB exists
 29 | 		},
 30 | 		{
 31 | 			name: "valid postgres config",
 32 | 			config: Config{
 33 | 				Type:     "postgres",
 34 | 				Host:     "localhost",
 35 | 				Port:     5432,
 36 | 				User:     "user",
 37 | 				Password: "password",
 38 | 				Name:     "testdb",
 39 | 			},
 40 | 			expectErr: false, // In real test this would be true unless DB exists
 41 | 		},
 42 | 		{
 43 | 			name: "invalid driver",
 44 | 			config: Config{
 45 | 				Type: "invalid",
 46 | 			},
 47 | 			expectErr: true,
 48 | 		},
 49 | 		{
 50 | 			name:      "empty config",
 51 | 			config:    Config{},
 52 | 			expectErr: true,
 53 | 		},
 54 | 	}
 55 | 
 56 | 	for _, tt := range tests {
 57 | 		t.Run(tt.name, func(t *testing.T) {
 58 | 			// We're not actually connecting to a database in unit tests
 59 | 			// This is a mock test that just verifies the code path
 60 | 			_, err := NewDatabase(tt.config)
 61 | 
 62 | 			if tt.expectErr {
 63 | 				assert.Error(t, err)
 64 | 			} else {
 65 | 				// In a real test, we'd assert.NoError, but since we don't have actual
 66 | 				// databases to connect to, we'll skip this check
 67 | 				// assert.NoError(t, err)
 68 | 				t.Skip("Skipping actual DB connection in unit test")
 69 | 			}
 70 | 		})
 71 | 	}
 72 | }
 73 | 
 74 | func TestConfigSetDefaults(t *testing.T) {
 75 | 	config := Config{}
 76 | 	config.SetDefaults()
 77 | 
 78 | 	assert.Equal(t, 25, config.MaxOpenConns)
 79 | 	assert.Equal(t, 5, config.MaxIdleConns)
 80 | 	assert.Equal(t, 5*time.Minute, config.ConnMaxLifetime)
 81 | }
 82 | 
 83 | // MockDatabase implements Database interface for testing
 84 | type MockDatabase struct {
 85 | 	dbInstance    *sql.DB
 86 | 	driverNameVal string
 87 | 	dsnVal        string
 88 | 	LastQuery     string
 89 | 	LastArgs      []interface{}
 90 | 	ReturnRows    *sql.Rows
 91 | 	ReturnRow     *sql.Row
 92 | 	ReturnErr     error
 93 | 	ReturnTx      *sql.Tx
 94 | 	ReturnResult  sql.Result
 95 | }
 96 | 
 97 | func NewMockDatabase() *MockDatabase {
 98 | 	return &MockDatabase{
 99 | 		driverNameVal: "mock",
100 | 		dsnVal:        "mock://localhost/testdb",
101 | 	}
102 | }
103 | 
104 | func (m *MockDatabase) Query(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
105 | 	m.LastQuery = query
106 | 	m.LastArgs = args
107 | 	return m.ReturnRows, m.ReturnErr
108 | }
109 | 
110 | func (m *MockDatabase) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
111 | 	m.LastQuery = query
112 | 	m.LastArgs = args
113 | 	return m.ReturnRow
114 | }
115 | 
116 | func (m *MockDatabase) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
117 | 	m.LastQuery = query
118 | 	m.LastArgs = args
119 | 	return m.ReturnResult, m.ReturnErr
120 | }
121 | 
122 | func (m *MockDatabase) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
123 | 	return m.ReturnTx, m.ReturnErr
124 | }
125 | 
126 | func (m *MockDatabase) Connect() error {
127 | 	return m.ReturnErr
128 | }
129 | 
130 | func (m *MockDatabase) Close() error {
131 | 	return m.ReturnErr
132 | }
133 | 
134 | func (m *MockDatabase) Ping(ctx context.Context) error {
135 | 	return m.ReturnErr
136 | }
137 | 
138 | func (m *MockDatabase) DriverName() string {
139 | 	return m.driverNameVal
140 | }
141 | 
142 | func (m *MockDatabase) ConnectionString() string {
143 | 	return m.dsnVal
144 | }
145 | 
146 | func (m *MockDatabase) DB() *sql.DB {
147 | 	return m.dbInstance
148 | }
149 | 
150 | // Example of a test that uses the mock database
151 | func TestUsingMockDatabase(t *testing.T) {
152 | 	mockDB := NewMockDatabase()
153 | 
154 | 	// This test demonstrates how to use the mock database
155 | 	assert.Equal(t, "mock", mockDB.DriverName())
156 | 	assert.Equal(t, "mock://localhost/testdb", mockDB.ConnectionString())
157 | }
158 | 
```

--------------------------------------------------------------------------------
/timescaledb-test.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | 
  3 | # Script to manage the TimescaleDB test environment
  4 | 
  5 | set -e
  6 | 
  7 | GREEN='\033[0;32m'
  8 | YELLOW='\033[1;33m'
  9 | RED='\033[0;31m'
 10 | NC='\033[0m' # No Color
 11 | 
 12 | function usage {
 13 |     echo "Usage: $0 [command]"
 14 |     echo "Commands:"
 15 |     echo "  start       - Start the TimescaleDB test environment"
 16 |     echo "  stop        - Stop the TimescaleDB test environment"
 17 |     echo "  status      - Check the status of the TimescaleDB test environment"
 18 |     echo "  logs        - View the logs of the TimescaleDB test environment"
 19 |     echo "  restart     - Restart the TimescaleDB test environment"
 20 |     echo "  cleanup     - Stop and remove the containers, networks, and volumes"
 21 |     echo "  help        - Show this help message"
 22 | }
 23 | 
 24 | function start {
 25 |     echo -e "${YELLOW}Starting TimescaleDB test environment...${NC}"
 26 |     docker-compose -f docker-compose.timescaledb-test.yml up -d
 27 |     
 28 |     echo -e "${YELLOW}Waiting for TimescaleDB to be ready...${NC}"
 29 |     max_attempts=30
 30 |     attempt=0
 31 |     
 32 |     while ! docker-compose -f docker-compose.timescaledb-test.yml exec timescaledb pg_isready -U timescale_user -d timescale_test > /dev/null 2>&1; do
 33 |         attempt=$((attempt+1))
 34 |         if [ $attempt -ge $max_attempts ]; then
 35 |             echo -e "${RED}Failed to connect to TimescaleDB after $max_attempts attempts.${NC}"
 36 |             exit 1
 37 |         fi
 38 |         echo -e "${YELLOW}Waiting for TimescaleDB to be ready (attempt $attempt/$max_attempts)...${NC}"
 39 |         sleep 2
 40 |     done
 41 |     
 42 |     echo -e "${GREEN}TimescaleDB test environment is running!${NC}"
 43 |     echo -e "${YELLOW}Connection information:${NC}"
 44 |     echo "  Host: localhost"
 45 |     echo "  Port: 15435"
 46 |     echo "  User: timescale_user"
 47 |     echo "  Password: timescale_password"
 48 |     echo "  Database: timescale_test"
 49 |     echo ""
 50 |     echo -e "${YELLOW}MCP Server:${NC}"
 51 |     echo "  URL: http://localhost:9093"
 52 |     echo ""
 53 |     echo -e "${YELLOW}You can access the TimescaleDB test environment using:${NC}"
 54 |     echo "  psql postgresql://timescale_user:timescale_password@localhost:15435/timescale_test"
 55 |     echo ""
 56 |     echo -e "${YELLOW}Available databases via MCP Server:${NC}"
 57 |     echo "  - timescaledb_test (admin access)"
 58 |     echo "  - timescaledb_readonly (read-only access)"
 59 |     echo "  - timescaledb_readwrite (read-write access)"
 60 | }
 61 | 
 62 | function stop {
 63 |     echo -e "${YELLOW}Stopping TimescaleDB test environment...${NC}"
 64 |     docker-compose -f docker-compose.timescaledb-test.yml stop
 65 |     echo -e "${GREEN}TimescaleDB test environment stopped.${NC}"
 66 | }
 67 | 
 68 | function status {
 69 |     echo -e "${YELLOW}Status of TimescaleDB test environment:${NC}"
 70 |     docker-compose -f docker-compose.timescaledb-test.yml ps
 71 | }
 72 | 
 73 | function logs {
 74 |     echo -e "${YELLOW}Logs of TimescaleDB test environment:${NC}"
 75 |     docker-compose -f docker-compose.timescaledb-test.yml logs "$@"
 76 | }
 77 | 
 78 | function restart {
 79 |     echo -e "${YELLOW}Restarting TimescaleDB test environment...${NC}"
 80 |     docker-compose -f docker-compose.timescaledb-test.yml restart
 81 |     echo -e "${GREEN}TimescaleDB test environment restarted.${NC}"
 82 | }
 83 | 
 84 | function cleanup {
 85 |     echo -e "${YELLOW}Cleaning up TimescaleDB test environment...${NC}"
 86 |     docker-compose -f docker-compose.timescaledb-test.yml down -v
 87 |     echo -e "${GREEN}TimescaleDB test environment cleaned up.${NC}"
 88 | }
 89 | 
 90 | # Main script
 91 | case "$1" in
 92 |     start)
 93 |         start
 94 |         ;;
 95 |     stop)
 96 |         stop
 97 |         ;;
 98 |     status)
 99 |         status
100 |         ;;
101 |     logs)
102 |         shift
103 |         logs "$@"
104 |         ;;
105 |     restart)
106 |         restart
107 |         ;;
108 |     cleanup)
109 |         cleanup
110 |         ;;
111 |     help)
112 |         usage
113 |         ;;
114 |     *)
115 |         usage
116 |         exit 1
117 |         ;;
118 | esac
119 | 
120 | exit 0 
```

--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------

```yaml
  1 | version: "3.8"
  2 | 
  3 | services:
  4 |   db-mcp-server:
  5 |     image: freepeak/db-mcp-server:latest
  6 |     platform: ${DOCKER_PLATFORM:-linux/amd64}
  7 |     ports:
  8 |       - "9092:9092"
  9 |     volumes:
 10 |       - ./config.json:/app/config.json
 11 |       - ./wait-for-it.sh:/app/wait-for-it.sh
 12 |     command:
 13 |       [
 14 |         "/bin/sh",
 15 |         "-c",
 16 |         "chmod +x /app/wait-for-it.sh && /app/wait-for-it.sh mysql1 3306 30 && /app/wait-for-it.sh mysql2 3306 30 && /app/wait-for-it.sh postgres1 5432 30 && /app/wait-for-it.sh postgres3 5432 30 && /app/server -t sse -c /app/config.json",
 17 |       ]
 18 |     depends_on:
 19 |       mysql1:
 20 |         condition: service_healthy
 21 |       mysql2:
 22 |         condition: service_healthy
 23 |       postgres1:
 24 |         condition: service_healthy
 25 |       postgres3:
 26 |         condition: service_healthy
 27 |     networks:
 28 |       - db-network
 29 | 
 30 |   mysql1:
 31 |     image: mysql:8.0
 32 |     environment:
 33 |       MYSQL_ROOT_PASSWORD: password
 34 |       MYSQL_DATABASE: db1
 35 |       MYSQL_USER: user1
 36 |       MYSQL_PASSWORD: password1
 37 |       MYSQL_AUTHENTICATION_PLUGIN: mysql_native_password
 38 |     ports:
 39 |       - "13306:3306"
 40 |     volumes:
 41 |       - mysql1_data:/var/lib/mysql
 42 |     command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
 43 |     healthcheck:
 44 |       test:
 45 |         [
 46 |           "CMD",
 47 |           "mysqladmin",
 48 |           "ping",
 49 |           "-h",
 50 |           "localhost",
 51 |           "-u",
 52 |           "root",
 53 |           "-ppassword",
 54 |         ]
 55 |       interval: 5s
 56 |       timeout: 5s
 57 |       retries: 10
 58 |     networks:
 59 |       - db-network
 60 | 
 61 |   mysql2:
 62 |     image: mysql:8.0
 63 |     environment:
 64 |       MYSQL_ROOT_PASSWORD: password
 65 |       MYSQL_DATABASE: db2
 66 |       MYSQL_USER: user2
 67 |       MYSQL_PASSWORD: password2
 68 |       MYSQL_AUTHENTICATION_PLUGIN: mysql_native_password
 69 |     ports:
 70 |       - "13307:3306"
 71 |     volumes:
 72 |       - mysql2_data:/var/lib/mysql
 73 |     command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
 74 |     healthcheck:
 75 |       test:
 76 |         [
 77 |           "CMD",
 78 |           "mysqladmin",
 79 |           "ping",
 80 |           "-h",
 81 |           "localhost",
 82 |           "-u",
 83 |           "root",
 84 |           "-ppassword",
 85 |         ]
 86 |       interval: 5s
 87 |       timeout: 5s
 88 |       retries: 10
 89 |     networks:
 90 |       - db-network
 91 | 
 92 |   postgres1:
 93 |     image: postgres:15
 94 |     environment:
 95 |       POSTGRES_USER: user1
 96 |       POSTGRES_PASSWORD: password1
 97 |       POSTGRES_DB: db1
 98 |     ports:
 99 |       - "15432:5432"
100 |     volumes:
101 |       - postgres1_data:/var/lib/postgresql/data
102 |     healthcheck:
103 |       test: ["CMD-SHELL", "pg_isready -U user1 -d db1"]
104 |       interval: 5s
105 |       timeout: 5s
106 |       retries: 10
107 |     networks:
108 |       - db-network
109 | 
110 |   postgres2:
111 |     image: postgres:17
112 |     environment:
113 |       POSTGRES_USER: user2
114 |       POSTGRES_PASSWORD: password2
115 |       POSTGRES_DB: db2
116 |     ports:
117 |       - "15433:5432"
118 |     volumes:
119 |       - postgres2_data:/var/lib/postgresql/data
120 |     healthcheck:
121 |       test: ["CMD-SHELL", "pg_isready -U user2 -d db2"]
122 |       interval: 5s
123 |       timeout: 5s
124 |       retries: 10
125 |     networks:
126 |       - db-network
127 | 
128 |   postgres3:
129 |     image: postgres:16.3-alpine
130 |     environment:
131 |       POSTGRES_USER: screener
132 |       POSTGRES_PASSWORD: screenerpass
133 |       POSTGRES_DB: screenerdb
134 |     ports:
135 |       - "15434:5432"
136 |     volumes:
137 |       - postgres3_data:/var/lib/postgresql/data
138 |     healthcheck:
139 |       test: ["CMD-SHELL", "pg_isready -U screener -d screenerdb"]
140 |       interval: 5s
141 |       timeout: 5s
142 |       retries: 10
143 |     networks:
144 |       - db-network
145 | 
146 | networks:
147 |   db-network:
148 |     driver: bridge
149 | 
150 | volumes:
151 |   mysql1_data:
152 |   mysql2_data:
153 |   postgres1_data:
154 |   postgres2_data:
155 |   postgres3_data:
156 | 
```

--------------------------------------------------------------------------------
/docker-compose.test.yml:
--------------------------------------------------------------------------------

```yaml
  1 | version: "3.8"
  2 | 
  3 | services:
  4 |   db-mcp-server:
  5 |     build: 
  6 |       context: .
  7 |       dockerfile: Dockerfile
  8 |     ports:
  9 |       - "9092:9092"
 10 |     volumes:
 11 |       - ./config.json:/app/config.json
 12 |       - ./wait-for-it.sh:/app/wait-for-it.sh
 13 |     command:
 14 |       [
 15 |         "/bin/sh",
 16 |         "-c",
 17 |         "chmod +x /app/wait-for-it.sh && /app/wait-for-it.sh mysql1 3306 30 && /app/wait-for-it.sh mysql2 3306 30 && /app/wait-for-it.sh postgres1 5432 30 && /app/wait-for-it.sh postgres2 5432 30 && /app/wait-for-it.sh postgres3 5432 30 && /app/server -t sse -c /app/config.json",
 18 |       ]
 19 |     depends_on:
 20 |       mysql1:
 21 |         condition: service_healthy
 22 |       mysql2:
 23 |         condition: service_healthy
 24 |       postgres1:
 25 |         condition: service_healthy
 26 |       postgres3:
 27 |         condition: service_healthy
 28 |     networks:
 29 |       - db-network
 30 | 
 31 |   mysql1:
 32 |     image: mysql:8.0
 33 |     environment:
 34 |       MYSQL_ROOT_PASSWORD: password
 35 |       MYSQL_DATABASE: db1
 36 |       MYSQL_USER: user1
 37 |       MYSQL_PASSWORD: password1
 38 |       MYSQL_AUTHENTICATION_PLUGIN: mysql_native_password
 39 |     ports:
 40 |       - "13306:3306"
 41 |     volumes:
 42 |       - mysql1_data:/var/lib/mysql
 43 |     command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
 44 |     healthcheck:
 45 |       test:
 46 |         [
 47 |           "CMD",
 48 |           "mysqladmin",
 49 |           "ping",
 50 |           "-h",
 51 |           "localhost",
 52 |           "-u",
 53 |           "root",
 54 |           "-ppassword",
 55 |         ]
 56 |       interval: 5s
 57 |       timeout: 5s
 58 |       retries: 10
 59 |     networks:
 60 |       - db-network
 61 | 
 62 |   mysql2:
 63 |     image: mysql:8.0
 64 |     environment:
 65 |       MYSQL_ROOT_PASSWORD: password
 66 |       MYSQL_DATABASE: db2
 67 |       MYSQL_USER: user2
 68 |       MYSQL_PASSWORD: password2
 69 |       MYSQL_AUTHENTICATION_PLUGIN: mysql_native_password
 70 |     ports:
 71 |       - "13307:3306"
 72 |     volumes:
 73 |       - mysql2_data:/var/lib/mysql
 74 |     command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
 75 |     healthcheck:
 76 |       test:
 77 |         [
 78 |           "CMD",
 79 |           "mysqladmin",
 80 |           "ping",
 81 |           "-h",
 82 |           "localhost",
 83 |           "-u",
 84 |           "root",
 85 |           "-ppassword",
 86 |         ]
 87 |       interval: 5s
 88 |       timeout: 5s
 89 |       retries: 10
 90 |     networks:
 91 |       - db-network
 92 | 
 93 |   postgres1:
 94 |     image: postgres:15
 95 |     environment:
 96 |       POSTGRES_USER: user1
 97 |       POSTGRES_PASSWORD: password1
 98 |       POSTGRES_DB: db1
 99 |     ports:
100 |       - "15432:5432"
101 |     volumes:
102 |       - postgres1_data:/var/lib/postgresql/data
103 |     healthcheck:
104 |       test: ["CMD-SHELL", "pg_isready -U user1 -d db1"]
105 |       interval: 5s
106 |       timeout: 5s
107 |       retries: 10
108 |     networks:
109 |       - db-network
110 | 
111 |   postgres2:
112 |     image: postgres:17
113 |     environment:
114 |       POSTGRES_USER: user2
115 |       POSTGRES_PASSWORD: password2
116 |       POSTGRES_DB: db2
117 |     ports:
118 |       - "15433:5432"
119 |     volumes:
120 |       - postgres2_data:/var/lib/postgresql/data
121 |     healthcheck:
122 |       test: ["CMD-SHELL", "pg_isready -U user2 -d db2"]
123 |       interval: 5s
124 |       timeout: 5s
125 |       retries: 10
126 |     networks:
127 |       - db-network
128 | 
129 |   postgres3:
130 |     image: postgres:16.3-alpine
131 |     environment:
132 |       POSTGRES_USER: screener
133 |       POSTGRES_PASSWORD: screenerpass
134 |       POSTGRES_DB: screenerdb
135 |     ports:
136 |       - "15434:5432"
137 |     volumes:
138 |       - postgres3_data:/var/lib/postgresql/data
139 |     healthcheck:
140 |       test: ["CMD-SHELL", "pg_isready -U screener -d screenerdb"]
141 |       interval: 5s
142 |       timeout: 5s
143 |       retries: 10
144 |     networks:
145 |       - db-network
146 | 
147 | networks:
148 |   db-network:
149 |     driver: bridge
150 | 
151 | volumes:
152 |   mysql1_data:
153 |   mysql2_data:
154 |   postgres1_data:
155 |   postgres2_data:
156 |   postgres3_data:
157 | 
```

--------------------------------------------------------------------------------
/init-scripts/timescaledb/02-sample-data.sql:
--------------------------------------------------------------------------------

```sql
 1 | -- Insert sample data for sensor_readings (past 30 days)
 2 | INSERT INTO test_data.sensor_readings (time, sensor_id, temperature, humidity, pressure, battery_level, location)
 3 | SELECT
 4 |     timestamp '2023-01-01 00:00:00' + (i || ' hours')::interval AS time,
 5 |     sensor_id,
 6 |     20 + 5 * random() AS temperature,      -- 20-25°C
 7 |     50 + 30 * random() AS humidity,        -- 50-80%
 8 |     1000 + 20 * random() AS pressure,      -- 1000-1020 hPa
 9 |     100 - i/100.0 AS battery_level,        -- Decreasing from 100%
10 |     (ARRAY['room', 'kitchen', 'outdoor', 'basement', 'garage'])[1 + floor(random() * 5)::int] AS location
11 | FROM
12 |     generate_series(0, 720) AS i,          -- 30 days hourly data
13 |     generate_series(1, 5) AS sensor_id;    -- 5 sensors
14 | 
15 | -- Insert sample data for weather_observations (past 90 days)
16 | INSERT INTO test_data.weather_observations (time, station_id, temperature, precipitation, wind_speed, wind_direction, atmospheric_pressure)
17 | SELECT
18 |     timestamp '2023-01-01 00:00:00' + (i || ' hours')::interval AS time,
19 |     station_id,
20 |     15 + 15 * random() AS temperature,     -- 15-30°C
21 |     CASE WHEN random() < 0.3 THEN random() * 5 ELSE 0 END AS precipitation, -- 70% chance of no rain
22 |     random() * 20 AS wind_speed,           -- 0-20 km/h
23 |     random() * 360 AS wind_direction,      -- 0-360 degrees
24 |     1010 + 10 * random() AS atmospheric_pressure -- 1010-1020 hPa
25 | FROM
26 |     generate_series(0, 2160) AS i,         -- 90 days hourly data
27 |     generate_series(1, 3) AS station_id;   -- 3 weather stations
28 | 
29 | -- Insert sample data for device_metrics (past 14 days at 1 minute intervals)
30 | INSERT INTO test_data.device_metrics (time, device_id, cpu_usage, memory_usage, network_in, network_out, disk_io)
31 | SELECT
32 |     timestamp '2023-01-15 00:00:00' + (i || ' minutes')::interval AS time,
33 |     device_id,
34 |     10 + 70 * random() AS cpu_usage,       -- 10-80%
35 |     20 + 60 * random() AS memory_usage,    -- 20-80%
36 |     random() * 1000 AS network_in,         -- 0-1000 KB/s
37 |     random() * 500 AS network_out,         -- 0-500 KB/s
38 |     random() * 100 AS disk_io              -- 0-100 MB/s
39 | FROM
40 |     generate_series(0, 20160, 60) AS i,    -- 14 days, every 60 minutes (for faster insertion)
41 |     generate_series(1, 10) AS device_id;   -- 10 devices
42 | 
43 | -- Insert sample data for stock_prices (past 2 years of daily data)
44 | INSERT INTO test_data.stock_prices (time, symbol, open_price, high_price, low_price, close_price, volume)
45 | SELECT
46 |     timestamp '2022-01-01 00:00:00' + (i || ' days')::interval AS time,
47 |     symbol,
48 |     100 + 50 * random() AS open_price,
49 |     100 + 50 * random() + 10 AS high_price,
50 |     100 + 50 * random() - 10 AS low_price,
51 |     100 + 50 * random() AS close_price,
52 |     floor(random() * 10000 + 1000) AS volume
53 | FROM
54 |     generate_series(0, 730) AS i,          -- 2 years of data
55 |     unnest(ARRAY['AAPL', 'MSFT', 'GOOGL', 'AMZN', 'META']) AS symbol;
56 | 
57 | -- Insert sample data for multi_partition_data
58 | INSERT INTO test_data.multi_partition_data (time, device_id, region, metric_value)
59 | SELECT
60 |     timestamp '2023-01-01 00:00:00' + (i || ' hours')::interval AS time,
61 |     device_id,
62 |     (ARRAY['us-east', 'us-west', 'eu-central', 'ap-south', 'sa-east'])[1 + floor(random() * 5)::int] AS region,
63 |     random() * 100 AS metric_value
64 | FROM
65 |     generate_series(0, 720) AS i,          -- 30 days hourly data
66 |     generate_series(1, 20) AS device_id;   -- 20 devices across regions
67 | 
68 | -- Insert some regular table data
69 | INSERT INTO test_data.regular_table (name, value, created_at)
70 | SELECT
71 |     'Item ' || i::text AS name,
72 |     random() * 1000 AS value,
73 |     timestamp '2023-01-01 00:00:00' + (i || ' hours')::interval AS created_at
74 | FROM
75 |     generate_series(1, 100) AS i; 
```

--------------------------------------------------------------------------------
/init-scripts/timescaledb/03-continuous-aggregates.sql:
--------------------------------------------------------------------------------

```sql
  1 | -- Create continuous aggregate for hourly sensor readings
  2 | CREATE MATERIALIZED VIEW test_data.hourly_sensor_stats
  3 | WITH (timescaledb.continuous) AS
  4 | SELECT
  5 |     time_bucket('1 hour', time) AS bucket,
  6 |     sensor_id,
  7 |     AVG(temperature) AS avg_temp,
  8 |     MIN(temperature) AS min_temp,
  9 |     MAX(temperature) AS max_temp,
 10 |     AVG(humidity) AS avg_humidity,
 11 |     AVG(pressure) AS avg_pressure
 12 | FROM test_data.sensor_readings
 13 | GROUP BY bucket, sensor_id;
 14 | 
 15 | -- Create continuous aggregate for daily weather observations
 16 | CREATE MATERIALIZED VIEW test_data.daily_weather_stats
 17 | WITH (timescaledb.continuous) AS
 18 | SELECT
 19 |     time_bucket('1 day', time) AS bucket,
 20 |     station_id,
 21 |     AVG(temperature) AS avg_temp,
 22 |     MIN(temperature) AS min_temp,
 23 |     MAX(temperature) AS max_temp,
 24 |     SUM(precipitation) AS total_precipitation,
 25 |     AVG(wind_speed) AS avg_wind_speed,
 26 |     AVG(atmospheric_pressure) AS avg_pressure
 27 | FROM test_data.weather_observations
 28 | GROUP BY bucket, station_id;
 29 | 
 30 | -- Create continuous aggregate for 5-minute device metrics
 31 | CREATE MATERIALIZED VIEW test_data.device_metrics_5min
 32 | WITH (timescaledb.continuous) AS
 33 | SELECT
 34 |     time_bucket('5 minutes', time) AS bucket,
 35 |     device_id,
 36 |     AVG(cpu_usage) AS avg_cpu,
 37 |     MAX(cpu_usage) AS max_cpu,
 38 |     AVG(memory_usage) AS avg_memory,
 39 |     MAX(memory_usage) AS max_memory,
 40 |     SUM(network_in) AS total_network_in,
 41 |     SUM(network_out) AS total_network_out
 42 | FROM test_data.device_metrics
 43 | GROUP BY bucket, device_id;
 44 | 
 45 | -- Create continuous aggregate for monthly stock data
 46 | CREATE MATERIALIZED VIEW test_data.monthly_stock_summary
 47 | WITH (timescaledb.continuous) AS
 48 | SELECT
 49 |     time_bucket('1 month', time) AS bucket,
 50 |     symbol,
 51 |     FIRST(open_price, time) AS monthly_open,
 52 |     MAX(high_price) AS monthly_high,
 53 |     MIN(low_price) AS monthly_low,
 54 |     LAST(close_price, time) AS monthly_close,
 55 |     SUM(volume) AS monthly_volume
 56 | FROM test_data.stock_prices
 57 | GROUP BY bucket, symbol;
 58 | 
 59 | -- Add continuous aggregate policies
 60 | SELECT add_continuous_aggregate_policy('test_data.hourly_sensor_stats',
 61 |     start_offset => INTERVAL '14 days',
 62 |     end_offset => INTERVAL '1 hour',
 63 |     schedule_interval => INTERVAL '1 hour');
 64 | 
 65 | SELECT add_continuous_aggregate_policy('test_data.daily_weather_stats',
 66 |     start_offset => INTERVAL '30 days',
 67 |     end_offset => INTERVAL '1 day',
 68 |     schedule_interval => INTERVAL '1 day');
 69 | 
 70 | SELECT add_continuous_aggregate_policy('test_data.device_metrics_5min',
 71 |     start_offset => INTERVAL '7 days',
 72 |     end_offset => INTERVAL '5 minutes',
 73 |     schedule_interval => INTERVAL '30 minutes');
 74 | 
 75 | SELECT add_continuous_aggregate_policy('test_data.monthly_stock_summary',
 76 |     start_offset => INTERVAL '12 months',
 77 |     end_offset => INTERVAL '1 day',
 78 |     schedule_interval => INTERVAL '1 day');
 79 | 
 80 | -- Enable compression on hypertables
 81 | ALTER TABLE test_data.sensor_readings SET (
 82 |     timescaledb.compress,
 83 |     timescaledb.compress_segmentby = 'sensor_id,location',
 84 |     timescaledb.compress_orderby = 'time DESC'
 85 | );
 86 | 
 87 | ALTER TABLE test_data.weather_observations SET (
 88 |     timescaledb.compress,
 89 |     timescaledb.compress_segmentby = 'station_id',
 90 |     timescaledb.compress_orderby = 'time DESC'
 91 | );
 92 | 
 93 | ALTER TABLE test_data.device_metrics SET (
 94 |     timescaledb.compress,
 95 |     timescaledb.compress_segmentby = 'device_id',
 96 |     timescaledb.compress_orderby = 'time DESC'
 97 | );
 98 | 
 99 | -- Add compression policies
100 | SELECT add_compression_policy('test_data.sensor_readings', INTERVAL '7 days');
101 | SELECT add_compression_policy('test_data.weather_observations', INTERVAL '30 days');
102 | SELECT add_compression_policy('test_data.device_metrics', INTERVAL '3 days');
103 | 
104 | -- Add retention policies
105 | SELECT add_retention_policy('test_data.sensor_readings', INTERVAL '90 days');
106 | SELECT add_retention_policy('test_data.device_metrics', INTERVAL '30 days'); 
```

--------------------------------------------------------------------------------
/internal/config/config_test.go:
--------------------------------------------------------------------------------

```go
  1 | package config
  2 | 
  3 | import (
  4 | 	"os"
  5 | 	"path/filepath"
  6 | 	"testing"
  7 | 
  8 | 	"github.com/stretchr/testify/assert"
  9 | )
 10 | 
 11 | func TestGetEnv(t *testing.T) {
 12 | 	// Setup
 13 | 	err := os.Setenv("TEST_ENV_VAR", "test_value")
 14 | 	if err != nil {
 15 | 		t.Fatalf("Failed to set environment variable: %v", err)
 16 | 	}
 17 | 	defer func() {
 18 | 		err := os.Unsetenv("TEST_ENV_VAR")
 19 | 		if err != nil {
 20 | 			t.Fatalf("Failed to unset environment variable: %v", err)
 21 | 		}
 22 | 	}()
 23 | 
 24 | 	// Test with existing env var
 25 | 	value := getEnv("TEST_ENV_VAR", "default_value")
 26 | 	assert.Equal(t, "test_value", value)
 27 | 
 28 | 	// Test with non-existing env var
 29 | 	value = getEnv("NON_EXISTING_VAR", "default_value")
 30 | 	assert.Equal(t, "default_value", value)
 31 | }
 32 | 
 33 | func TestLoadConfig(t *testing.T) {
 34 | 	// Clear any environment variables that might affect the test
 35 | 	vars := []string{
 36 | 		"SERVER_PORT", "TRANSPORT_MODE", "LOG_LEVEL", "DB_TYPE",
 37 | 		"DB_HOST", "DB_PORT", "DB_USER", "DB_PASSWORD", "DB_NAME",
 38 | 	}
 39 | 
 40 | 	for _, v := range vars {
 41 | 		err := os.Unsetenv(v)
 42 | 		if err != nil {
 43 | 			t.Logf("Failed to unset %s: %v", v, err)
 44 | 		}
 45 | 	}
 46 | 
 47 | 	// Get current working directory and handle .env file
 48 | 	cwd, err := os.Getwd()
 49 | 	if err != nil {
 50 | 		t.Fatalf("Failed to get current working directory: %v", err)
 51 | 	}
 52 | 	envPath := filepath.Join(cwd, ".env")
 53 | 	tempPath := filepath.Join(cwd, ".env.bak")
 54 | 
 55 | 	// Save existing .env if it exists
 56 | 	envExists := false
 57 | 	if _, err := os.Stat(envPath); err == nil {
 58 | 		envExists = true
 59 | 		err = os.Rename(envPath, tempPath)
 60 | 		if err != nil {
 61 | 			t.Fatalf("Failed to rename .env file: %v", err)
 62 | 		}
 63 | 		// Restore at the end
 64 | 		defer func() {
 65 | 			if envExists {
 66 | 				if err := os.Rename(tempPath, envPath); err != nil {
 67 | 					t.Logf("Failed to restore .env file: %v", err)
 68 | 				}
 69 | 			}
 70 | 		}()
 71 | 	}
 72 | 
 73 | 	// Test with default values (no .env file and no environment variables)
 74 | 	config, err := LoadConfig()
 75 | 	assert.NoError(t, err)
 76 | 	assert.Equal(t, 9090, config.ServerPort)
 77 | 	assert.Equal(t, "sse", config.TransportMode)
 78 | 	assert.Equal(t, "info", config.LogLevel)
 79 | 	assert.Equal(t, "mysql", config.DBConfig.Type)
 80 | 	assert.Equal(t, "localhost", config.DBConfig.Host)
 81 | 	assert.Equal(t, 3306, config.DBConfig.Port)
 82 | 	assert.Equal(t, "", config.DBConfig.User)
 83 | 	assert.Equal(t, "", config.DBConfig.Password)
 84 | 	assert.Equal(t, "", config.DBConfig.Name)
 85 | 
 86 | 	// Test with custom environment variables
 87 | 	err = os.Setenv("SERVER_PORT", "8080")
 88 | 	if err != nil {
 89 | 		t.Fatalf("Failed to set SERVER_PORT: %v", err)
 90 | 	}
 91 | 	err = os.Setenv("TRANSPORT_MODE", "stdio")
 92 | 	if err != nil {
 93 | 		t.Fatalf("Failed to set TRANSPORT_MODE: %v", err)
 94 | 	}
 95 | 	err = os.Setenv("LOG_LEVEL", "debug")
 96 | 	if err != nil {
 97 | 		t.Fatalf("Failed to set LOG_LEVEL: %v", err)
 98 | 	}
 99 | 	err = os.Setenv("DB_TYPE", "postgres")
100 | 	if err != nil {
101 | 		t.Fatalf("Failed to set DB_TYPE: %v", err)
102 | 	}
103 | 	err = os.Setenv("DB_HOST", "db.example.com")
104 | 	if err != nil {
105 | 		t.Fatalf("Failed to set DB_HOST: %v", err)
106 | 	}
107 | 	err = os.Setenv("DB_PORT", "5432")
108 | 	if err != nil {
109 | 		t.Fatalf("Failed to set DB_PORT: %v", err)
110 | 	}
111 | 	err = os.Setenv("DB_USER", "testuser")
112 | 	if err != nil {
113 | 		t.Fatalf("Failed to set DB_USER: %v", err)
114 | 	}
115 | 	err = os.Setenv("DB_PASSWORD", "testpass")
116 | 	if err != nil {
117 | 		t.Fatalf("Failed to set DB_PASSWORD: %v", err)
118 | 	}
119 | 	err = os.Setenv("DB_NAME", "testdb")
120 | 	if err != nil {
121 | 		t.Fatalf("Failed to set DB_NAME: %v", err)
122 | 	}
123 | 
124 | 	defer func() {
125 | 		for _, v := range vars {
126 | 			if cleanupErr := os.Unsetenv(v); cleanupErr != nil {
127 | 				t.Logf("Failed to unset %s: %v", v, cleanupErr)
128 | 			}
129 | 		}
130 | 	}()
131 | 
132 | 	config, err = LoadConfig()
133 | 	assert.NoError(t, err)
134 | 	assert.Equal(t, 8080, config.ServerPort)
135 | 	assert.Equal(t, "stdio", config.TransportMode)
136 | 	assert.Equal(t, "debug", config.LogLevel)
137 | 	assert.Equal(t, "postgres", config.DBConfig.Type)
138 | 	assert.Equal(t, "db.example.com", config.DBConfig.Host)
139 | 	assert.Equal(t, 5432, config.DBConfig.Port)
140 | 	assert.Equal(t, "testuser", config.DBConfig.User)
141 | 	assert.Equal(t, "testpass", config.DBConfig.Password)
142 | 	assert.Equal(t, "testdb", config.DBConfig.Name)
143 | }
144 | 
```

--------------------------------------------------------------------------------
/internal/config/config.go:
--------------------------------------------------------------------------------

```go
  1 | package config
  2 | 
  3 | import (
  4 | 	"encoding/json"
  5 | 	"fmt"
  6 | 	"os"
  7 | 	"path/filepath"
  8 | 	"strconv"
  9 | 
 10 | 	"github.com/joho/godotenv"
 11 | 
 12 | 	"github.com/FreePeak/db-mcp-server/internal/logger"
 13 | 	"github.com/FreePeak/db-mcp-server/pkg/db"
 14 | )
 15 | 
 16 | // Config holds all server configuration
 17 | type Config struct {
 18 | 	ServerPort     int
 19 | 	TransportMode  string
 20 | 	LogLevel       string
 21 | 	DBConfig       DatabaseConfig    // Legacy single database config
 22 | 	MultiDBConfig  *db.MultiDBConfig // New multi-database config
 23 | 	ConfigPath     string            // Path to the configuration file
 24 | 	DisableLogging bool              // When true, disables logging in stdio/SSE transport
 25 | }
 26 | 
 27 | // DatabaseConfig holds database configuration (legacy support)
 28 | type DatabaseConfig struct {
 29 | 	Type     string
 30 | 	Host     string
 31 | 	Port     int
 32 | 	User     string
 33 | 	Password string
 34 | 	Name     string
 35 | }
 36 | 
 37 | // LoadConfig loads the configuration from environment variables and optional JSON config
 38 | func LoadConfig() (*Config, error) {
 39 | 	// Initialize logger with default level first to avoid nil pointer
 40 | 	logger.Initialize("info")
 41 | 
 42 | 	// Load .env file if it exists
 43 | 	err := godotenv.Load()
 44 | 	if err != nil {
 45 | 		logger.Info("Warning: .env file not found, using environment variables only")
 46 | 	} else {
 47 | 		logger.Info("Loaded configuration from .env file")
 48 | 	}
 49 | 
 50 | 	port, err := strconv.Atoi(getEnv("SERVER_PORT", "9090"))
 51 | 	if err != nil {
 52 | 		logger.Warn("Warning: Invalid SERVER_PORT value, using default 9090")
 53 | 		port = 9090
 54 | 	}
 55 | 
 56 | 	dbPort, err := strconv.Atoi(getEnv("DB_PORT", "3306"))
 57 | 	if err != nil {
 58 | 		logger.Warn("Warning: Invalid DB_PORT value, using default 3306")
 59 | 		dbPort = 3306
 60 | 	}
 61 | 
 62 | 	// Get config path from environment or use default
 63 | 	configPath := getEnv("CONFIG_PATH", "")
 64 | 	if configPath == "" {
 65 | 		configPath = getEnv("DB_CONFIG_FILE", "config.json")
 66 | 	}
 67 | 
 68 | 	// Resolve absolute path if relative path is provided
 69 | 	if !filepath.IsAbs(configPath) {
 70 | 		absPath, err := filepath.Abs(configPath)
 71 | 		if err != nil {
 72 | 			logger.Warn("Warning: Could not resolve absolute path for config file: %v", err)
 73 | 		} else {
 74 | 			configPath = absPath
 75 | 		}
 76 | 	}
 77 | 
 78 | 	// Parse DISABLE_LOGGING env var
 79 | 	disableLogging := false
 80 | 	if v := getEnv("DISABLE_LOGGING", "false"); v == "true" || v == "1" {
 81 | 		disableLogging = true
 82 | 	}
 83 | 
 84 | 	config := &Config{
 85 | 		ServerPort:     port,
 86 | 		TransportMode:  getEnv("TRANSPORT_MODE", "sse"),
 87 | 		LogLevel:       getEnv("LOG_LEVEL", "info"),
 88 | 		ConfigPath:     configPath,
 89 | 		DisableLogging: disableLogging,
 90 | 		DBConfig: DatabaseConfig{
 91 | 			Type:     getEnv("DB_TYPE", "mysql"),
 92 | 			Host:     getEnv("DB_HOST", "localhost"),
 93 | 			Port:     dbPort,
 94 | 			User:     getEnv("DB_USER", ""),
 95 | 			Password: getEnv("DB_PASSWORD", ""),
 96 | 			Name:     getEnv("DB_NAME", ""),
 97 | 		},
 98 | 	}
 99 | 
100 | 	// Try to load multi-database configuration from JSON file
101 | 	if _, err := os.Stat(config.ConfigPath); err == nil {
102 | 		logger.Info("Loading configuration from: %s", config.ConfigPath)
103 | 		configData, err := os.ReadFile(config.ConfigPath)
104 | 		if err != nil {
105 | 			return nil, fmt.Errorf("failed to read config file %s: %w", config.ConfigPath, err)
106 | 		}
107 | 
108 | 		var multiDBConfig db.MultiDBConfig
109 | 		if err := json.Unmarshal(configData, &multiDBConfig); err != nil {
110 | 			return nil, fmt.Errorf("failed to parse config file %s: %w", config.ConfigPath, err)
111 | 		}
112 | 
113 | 		config.MultiDBConfig = &multiDBConfig
114 | 	} else {
115 | 		logger.Info("Warning: Config file not found at %s, using environment variables", config.ConfigPath)
116 | 		// If no JSON config found, create a single connection config from environment variables
117 | 		config.MultiDBConfig = &db.MultiDBConfig{
118 | 			Connections: []db.DatabaseConnectionConfig{
119 | 				{
120 | 					ID:       "default",
121 | 					Type:     config.DBConfig.Type,
122 | 					Host:     config.DBConfig.Host,
123 | 					Port:     config.DBConfig.Port,
124 | 					User:     config.DBConfig.User,
125 | 					Password: config.DBConfig.Password,
126 | 					Name:     config.DBConfig.Name,
127 | 				},
128 | 			},
129 | 		}
130 | 	}
131 | 
132 | 	return config, nil
133 | }
134 | 
135 | // getEnv gets an environment variable or returns a default value
136 | func getEnv(key, defaultValue string) string {
137 | 	value := os.Getenv(key)
138 | 	if value == "" {
139 | 		return defaultValue
140 | 	}
141 | 	return value
142 | }
143 | 
```

--------------------------------------------------------------------------------
/pkg/dbtools/tx_test.go:
--------------------------------------------------------------------------------

```go
  1 | package dbtools
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"database/sql"
  6 | 	"errors"
  7 | 	"testing"
  8 | 
  9 | 	"github.com/stretchr/testify/assert"
 10 | 	"github.com/stretchr/testify/mock"
 11 | )
 12 | 
 13 | // MockTx is a mock implementation of sql.Tx
 14 | type MockTx struct {
 15 | 	mock.Mock
 16 | }
 17 | 
 18 | func (m *MockTx) Exec(query string, args ...interface{}) (sql.Result, error) {
 19 | 	mockArgs := m.Called(append([]interface{}{query}, args...)...)
 20 | 	return mockArgs.Get(0).(sql.Result), mockArgs.Error(1)
 21 | }
 22 | 
 23 | func (m *MockTx) Query(query string, args ...interface{}) (*sql.Rows, error) {
 24 | 	mockArgs := m.Called(append([]interface{}{query}, args...)...)
 25 | 	return mockArgs.Get(0).(*sql.Rows), mockArgs.Error(1)
 26 | }
 27 | 
 28 | func (m *MockTx) QueryRow(query string, args ...interface{}) *sql.Row {
 29 | 	mockArgs := m.Called(append([]interface{}{query}, args...)...)
 30 | 	return mockArgs.Get(0).(*sql.Row)
 31 | }
 32 | 
 33 | func (m *MockTx) Prepare(query string) (*sql.Stmt, error) {
 34 | 	mockArgs := m.Called(query)
 35 | 	return mockArgs.Get(0).(*sql.Stmt), mockArgs.Error(1)
 36 | }
 37 | 
 38 | func (m *MockTx) Stmt(stmt *sql.Stmt) *sql.Stmt {
 39 | 	mockArgs := m.Called(stmt)
 40 | 	return mockArgs.Get(0).(*sql.Stmt)
 41 | }
 42 | 
 43 | func (m *MockTx) Commit() error {
 44 | 	mockArgs := m.Called()
 45 | 	return mockArgs.Error(0)
 46 | }
 47 | 
 48 | func (m *MockTx) Rollback() error {
 49 | 	mockArgs := m.Called()
 50 | 	return mockArgs.Error(0)
 51 | }
 52 | 
 53 | // TestBeginTx tests the BeginTx function
 54 | func TestBeginTx(t *testing.T) {
 55 | 	// Setup mock
 56 | 	mockDB := new(MockDB)
 57 | 
 58 | 	// Use nil for tx since we can't easily create a real *sql.Tx
 59 | 	var nilTx *sql.Tx = nil
 60 | 
 61 | 	ctx := context.Background()
 62 | 	opts := &sql.TxOptions{ReadOnly: true}
 63 | 
 64 | 	// Mock expectations
 65 | 	mockDB.On("BeginTx", ctx, opts).Return(nilTx, nil)
 66 | 
 67 | 	// Call function under test
 68 | 	tx, err := BeginTx(ctx, mockDB, opts)
 69 | 
 70 | 	// Assertions
 71 | 	assert.NoError(t, err)
 72 | 	assert.Nil(t, tx)
 73 | 	mockDB.AssertExpectations(t)
 74 | }
 75 | 
 76 | // TestBeginTxWithError tests the BeginTx function with an error
 77 | func TestBeginTxWithError(t *testing.T) {
 78 | 	// Setup mock
 79 | 	mockDB := new(MockDB)
 80 | 	expectedErr := errors.New("database error")
 81 | 
 82 | 	ctx := context.Background()
 83 | 	opts := &sql.TxOptions{ReadOnly: true}
 84 | 
 85 | 	// Mock expectations
 86 | 	mockDB.On("BeginTx", ctx, opts).Return((*sql.Tx)(nil), expectedErr)
 87 | 
 88 | 	// Call function under test
 89 | 	tx, err := BeginTx(ctx, mockDB, opts)
 90 | 
 91 | 	// Assertions
 92 | 	assert.Error(t, err)
 93 | 	assert.Equal(t, expectedErr, err)
 94 | 	assert.Nil(t, tx)
 95 | 	mockDB.AssertExpectations(t)
 96 | }
 97 | 
 98 | // TestTransactionCommit tests a successful transaction with commit
 99 | func TestTransactionCommit(t *testing.T) {
100 | 	// Skip this test for now as it's not possible to easily mock sql.Tx
101 | 	t.Skip("Skipping TestTransactionCommit as it requires complex mocking of sql.Tx")
102 | 
103 | 	// The test would look something like this, but we can't easily mock sql.Tx:
104 | 	/*
105 | 		// Setup mocks
106 | 		mockDB := new(MockDB)
107 | 		mockTx := new(MockTx)
108 | 		mockResult := new(MockResult)
109 | 
110 | 		ctx := context.Background()
111 | 		opts := &sql.TxOptions{ReadOnly: false}
112 | 		query := "INSERT INTO test_table (name) VALUES (?)"
113 | 		args := []interface{}{"test"}
114 | 
115 | 		// Mock expectations
116 | 		mockDB.On("BeginTx", ctx, opts).Return(nilTx, nil)
117 | 		mockTx.On("Exec", query, args[0]).Return(mockResult, nil)
118 | 		mockTx.On("Commit").Return(nil)
119 | 		mockResult.On("RowsAffected").Return(int64(1), nil)
120 | 
121 | 		// Start transaction
122 | 		tx, err := BeginTx(ctx, mockDB, opts)
123 | 		assert.NoError(t, err)
124 | 	*/
125 | }
126 | 
127 | // TestTransactionRollback tests a transaction with rollback
128 | func TestTransactionRollback(t *testing.T) {
129 | 	// Skip this test for now as it's not possible to easily mock sql.Tx
130 | 	t.Skip("Skipping TestTransactionRollback as it requires complex mocking of sql.Tx")
131 | 
132 | 	// The test would look something like this, but we can't easily mock sql.Tx:
133 | 	/*
134 | 		// Setup mocks
135 | 		mockDB := new(MockDB)
136 | 		mockTx := new(MockTx)
137 | 		mockErr := errors.New("exec error")
138 | 
139 | 		ctx := context.Background()
140 | 		opts := &sql.TxOptions{ReadOnly: false}
141 | 		query := "INSERT INTO test_table (name) VALUES (?)"
142 | 		args := []interface{}{"test"}
143 | 
144 | 		// Mock expectations
145 | 		mockDB.On("BeginTx", ctx, opts).Return(nilTx, nil)
146 | 		mockTx.On("Exec", query, args[0]).Return(nil, mockErr)
147 | 		mockTx.On("Rollback").Return(nil)
148 | 
149 | 		// Start transaction
150 | 		tx, err := BeginTx(ctx, mockDB, opts)
151 | 		assert.NoError(t, err)
152 | 	*/
153 | }
154 | 
```

--------------------------------------------------------------------------------
/pkg/dbtools/schema_test.go:
--------------------------------------------------------------------------------

```go
  1 | package dbtools
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"database/sql"
  6 | 	"testing"
  7 | 
  8 | 	"github.com/stretchr/testify/assert"
  9 | 	"github.com/stretchr/testify/mock"
 10 | )
 11 | 
 12 | // TestSchemaExplorerTool tests the schema explorer tool creation
 13 | func TestSchemaExplorerTool(t *testing.T) {
 14 | 	// Get the tool
 15 | 	tool := createSchemaExplorerTool()
 16 | 
 17 | 	// Assertions
 18 | 	assert.NotNil(t, tool)
 19 | 	assert.Equal(t, "dbSchema", tool.Name)
 20 | 	assert.Equal(t, "Auto-discover database structure and relationships", tool.Description)
 21 | 	assert.Equal(t, "database", tool.Category)
 22 | 	assert.NotNil(t, tool.Handler)
 23 | 
 24 | 	// Check input schema
 25 | 	assert.Equal(t, "object", tool.InputSchema.Type)
 26 | 	assert.Contains(t, tool.InputSchema.Properties, "component")
 27 | 	assert.Contains(t, tool.InputSchema.Properties, "table")
 28 | 	assert.Contains(t, tool.InputSchema.Properties, "timeout")
 29 | 	assert.Contains(t, tool.InputSchema.Required, "component")
 30 | }
 31 | 
 32 | // TestHandleSchemaExplorerWithInvalidComponent tests the schema explorer handler with an invalid component
 33 | func TestHandleSchemaExplorerWithInvalidComponent(t *testing.T) {
 34 | 	// Skip test that requires database connection
 35 | 	t.Skip("Skipping test that requires database connection")
 36 | }
 37 | 
 38 | // TestHandleSchemaExplorerWithMissingTableParam tests the schema explorer handler with a missing table parameter
 39 | func TestHandleSchemaExplorerWithMissingTableParam(t *testing.T) {
 40 | 	// Skip test that requires database connection
 41 | 	t.Skip("Skipping test that requires database connection")
 42 | }
 43 | 
 44 | // MockDatabase for testing
 45 | type MockDatabase struct {
 46 | 	mock.Mock
 47 | }
 48 | 
 49 | func (m *MockDatabase) Connect() error {
 50 | 	args := m.Called()
 51 | 	return args.Error(0)
 52 | }
 53 | 
 54 | func (m *MockDatabase) Close() error {
 55 | 	args := m.Called()
 56 | 	return args.Error(0)
 57 | }
 58 | 
 59 | func (m *MockDatabase) Ping(ctx context.Context) error {
 60 | 	args := m.Called(ctx)
 61 | 	return args.Error(0)
 62 | }
 63 | 
 64 | func (m *MockDatabase) Query(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
 65 | 	mockArgs := []interface{}{ctx, query}
 66 | 	mockArgs = append(mockArgs, args...)
 67 | 	results := m.Called(mockArgs...)
 68 | 	return results.Get(0).(*sql.Rows), results.Error(1)
 69 | }
 70 | 
 71 | func (m *MockDatabase) QueryRow(ctx context.Context, query string, args ...interface{}) *sql.Row {
 72 | 	mockArgs := []interface{}{ctx, query}
 73 | 	mockArgs = append(mockArgs, args...)
 74 | 	results := m.Called(mockArgs...)
 75 | 	return results.Get(0).(*sql.Row)
 76 | }
 77 | 
 78 | func (m *MockDatabase) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
 79 | 	mockArgs := []interface{}{ctx, query}
 80 | 	mockArgs = append(mockArgs, args...)
 81 | 	results := m.Called(mockArgs...)
 82 | 	return results.Get(0).(sql.Result), results.Error(1)
 83 | }
 84 | 
 85 | func (m *MockDatabase) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
 86 | 	args := m.Called(ctx, opts)
 87 | 	return args.Get(0).(*sql.Tx), args.Error(1)
 88 | }
 89 | 
 90 | func (m *MockDatabase) DriverName() string {
 91 | 	args := m.Called()
 92 | 	return args.String(0)
 93 | }
 94 | 
 95 | func (m *MockDatabase) ConnectionString() string {
 96 | 	args := m.Called()
 97 | 	return args.String(0)
 98 | }
 99 | 
100 | func (m *MockDatabase) DB() *sql.DB {
101 | 	args := m.Called()
102 | 	return args.Get(0).(*sql.DB)
103 | }
104 | 
105 | // TestGetTablesWithMock tests the getTables function using mock data
106 | func TestGetTablesWithMock(t *testing.T) {
107 | 	// Skip the test if the code is too complex to mock or needs significant refactoring
108 | 	t.Skip("Skipping test until the schema.go code can be refactored to better support unit testing")
109 | 
110 | 	// In a real fix, the schema.go code should be refactored to:
111 | 	// 1. Add a check at the beginning of getTables for nil dbInstance and dbConfig
112 | 	// 2. Return mock data in that case instead of proceeding with the query
113 | 	// 3. Ensure the mock data has the "mock" flag set to true
114 | }
115 | 
116 | // TestGetFullSchema tests the getFullSchema function
117 | func TestGetFullSchema(t *testing.T) {
118 | 	// Skip the test if the code is too complex to mock or needs significant refactoring
119 | 	t.Skip("Skipping test until the schema.go code can be refactored to better support unit testing")
120 | 
121 | 	// In a real fix, the schema.go code should be refactored to:
122 | 	// 1. Add a check at the beginning of getFullSchema for nil dbInstance and dbConfig
123 | 	// 2. Return mock data in that case instead of proceeding with the query
124 | 	// 3. Ensure the mock data has the "mock" flag set to true
125 | }
126 | 
```

--------------------------------------------------------------------------------
/assets/logo.svg:
--------------------------------------------------------------------------------

```
 1 | <?xml version="1.0" encoding="UTF-8"?>
 2 | <svg id="Layer_1" xmlns="http://www.w3.org/2000/svg" version="1.1" viewBox="0 0 240 120">
 3 |   <!-- Generator: Adobe Illustrator 29.3.0, SVG Export Plug-In . SVG Version: 2.1.0 Build 146)  -->
 4 |   <defs>
 5 |     <style>
 6 |       .st0 {
 7 |         stroke-linecap: round;
 8 |         stroke-linejoin: round;
 9 |       }
10 | 
11 |       .st0, .st1 {
12 |         fill: none;
13 |         stroke: #1e54b7;
14 |       }
15 | 
16 |       .st1 {
17 |         isolation: isolate;
18 |         opacity: .1;
19 |         stroke-width: .5px;
20 |       }
21 | 
22 |       .st2 {
23 |         fill: #1e54b7;
24 |       }
25 |     </style>
26 |   </defs>
27 |   <ellipse class="st0" cx="95" cy="43" rx="15" ry="6">
28 |     <animate accumulate="none" additive="replace" attributeName="ry" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
29 |   </ellipse>
30 |   <ellipse class="st0" cx="95" cy="58" rx="15" ry="6">
31 |     <animate accumulate="none" additive="replace" attributeName="ry" begin="0.3s" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
32 |   </ellipse>
33 |   <ellipse class="st0" cx="95" cy="73" rx="15" ry="6">
34 |     <animate accumulate="none" additive="replace" attributeName="ry" begin="0.6s" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
35 |   </ellipse>
36 |   <line class="st0" x1="80" y1="43" x2="80" y2="73"/>
37 |   <line class="st0" x1="110" y1="43" x2="110" y2="73"/>
38 |   <ellipse class="st0" cx="155" cy="43" rx="15" ry="6">
39 |     <animate accumulate="none" additive="replace" attributeName="ry" begin="0.2s" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
40 |   </ellipse>
41 |   <ellipse class="st0" cx="155" cy="58" rx="15" ry="6">
42 |     <animate accumulate="none" additive="replace" attributeName="ry" begin="0.5s" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
43 |   </ellipse>
44 |   <ellipse class="st0" cx="155" cy="73" rx="15" ry="6">
45 |     <animate accumulate="none" additive="replace" attributeName="ry" begin="0.8s" calcMode="linear" dur="3s" fill="remove" repeatCount="indefinite" restart="always" values="6;5;6"/>
46 |   </ellipse>
47 |   <line class="st0" x1="140" y1="43" x2="140" y2="73"/>
48 |   <line class="st0" x1="170" y1="43" x2="170" y2="73"/>
49 |   <line class="st0" x1="95" y1="87.5" x2="95" y2="92.5"/>
50 |   <line class="st0" x1="95" y1="92.5" x2="155" y2="92.5"/>
51 |   <circle class="st2" cx="95" cy="85.9" r="1.6">
52 |     <animate accumulate="none" additive="replace" attributeName="r" calcMode="linear" dur="2s" fill="remove" repeatCount="indefinite" restart="always" values="1.571;2.5;1.571"/>
53 |   </circle>
54 |   <rect class="st2" x="122.7" y="90.2" width="4.6" height="4.6" transform="translate(-28.8 115.5) rotate(-45)"/>
55 |   <circle class="st2" cx="155" cy="85.8" r="1.6">
56 |     <animate accumulate="none" additive="replace" attributeName="r" calcMode="linear" dur="2s" fill="remove" repeatCount="indefinite" restart="always" values="1.571;2.5;1.571"/>
57 |   </circle>
58 |   <circle class="st1" cx="125" cy="56.2" r="31.2">
59 |     <animate accumulate="none" additive="replace" attributeName="r" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="35;45;35"/>
60 |     <animate accumulate="none" additive="replace" attributeName="opacity" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="0.1;0;0.1"/>
61 |   </circle>
62 |   <circle class="st1" cx="125" cy="56.2" r="24.1">
63 |     <animate accumulate="none" additive="replace" attributeName="r" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="25;35;25"/>
64 |     <animate accumulate="none" additive="replace" attributeName="opacity" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="0.1;0;0.1"/>
65 |   </circle>
66 |   <circle class="st1" cx="125" cy="56.2" r="17">
67 |     <animate accumulate="none" additive="replace" attributeName="r" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="15;25;15"/>
68 |     <animate accumulate="none" additive="replace" attributeName="opacity" calcMode="linear" dur="4s" fill="remove" repeatCount="indefinite" restart="always" values="0.1;0;0.1"/>
69 |   </circle>
70 |   <line class="st0" x1="155" y1="87.5" x2="155" y2="92.5"/>
71 | </svg>
```

--------------------------------------------------------------------------------
/docs/REFACTORING.md:
--------------------------------------------------------------------------------

```markdown
  1 | # MCP Server Refactoring Documentation
  2 | 
  3 | ## Overview
  4 | 
  5 | This document outlines the refactoring changes made to the MCP server to better support VS Code and Cursor extension integration. The refactoring focused on standardizing tool definitions, improving error handling, and adding editor-specific functionality.
  6 | 
  7 | ## Key Changes
  8 | 
  9 | ### 1. Enhanced Tool Structure
 10 | 
 11 | The `Tool` structure was extended to support:
 12 | 
 13 | - Context-aware execution with proper cancellation support
 14 | - Categorization of tools (e.g., "editor" category)
 15 | - Better schema validation
 16 | - Progress reporting during execution
 17 | 
 18 | ```go
 19 | // Before
 20 | type Tool struct {
 21 |     Name        string
 22 |     Description string
 23 |     InputSchema ToolInputSchema
 24 |     Handler     func(params map[string]interface{}) (interface{}, error)
 25 | }
 26 | 
 27 | // After
 28 | type Tool struct {
 29 |     Name        string
 30 |     Description string
 31 |     InputSchema ToolInputSchema
 32 |     Category    string // New field for grouping tools
 33 |     CreatedAt   time.Time // New field for tracking tool registration
 34 |     RawSchema   interface{} // Alternative schema representation
 35 |     Handler     func(ctx context.Context, params map[string]interface{}) (interface{}, error) // Context-aware
 36 | }
 37 | ```
 38 | 
 39 | ### 2. Dynamic Tool Registration
 40 | 
 41 | The tool registry was improved to support:
 42 | 
 43 | - Runtime tool registration and deregistration
 44 | - Tool categorization and filtering
 45 | - Input validation against schemas
 46 | - Timeouts and context handling
 47 | 
 48 | New methods added:
 49 | - `DeregisterTool`
 50 | - `GetToolsByCategory`
 51 | - `ExecuteToolWithTimeout`
 52 | - `ValidateToolInput`
 53 | 
 54 | ### 3. Editor Integration Support
 55 | 
 56 | Added support for editor-specific functionality:
 57 | 
 58 | - New editor context method (`editor/context`) for receiving editor state
 59 | - Session data storage for maintaining editor context
 60 | - Editor-specific tools (file info, code completion, code analysis)
 61 | - Category-based tool organization
 62 | 
 63 | ### 4. Improved Error Handling
 64 | 
 65 | Enhanced error handling with:
 66 | 
 67 | - Structured error responses for both protocol and tool execution errors
 68 | - New error types with clear error codes
 69 | - Proper error propagation from tools to clients
 70 | - Context-based cancellation and timeout handling
 71 | 
 72 | ### 5. Progress Reporting
 73 | 
 74 | Added support for reporting progress during tool execution:
 75 | 
 76 | - Progress token support in tool execution requests
 77 | - Notification channel for progress events
 78 | - Integration with the SSE transport for real-time updates
 79 | 
 80 | ### 6. Client Compatibility
 81 | 
 82 | Improved compatibility with VS Code and Cursor extensions:
 83 | 
 84 | - Added alias method `tools/execute` (alternative to `tools/call`)
 85 | - Standardized response format following MCP specification
 86 | - Properly formatted tool schemas matching client expectations
 87 | - Support for client-specific notification formats
 88 | 
 89 | ## Implementation Details
 90 | 
 91 | ### Tool Registration Flow
 92 | 
 93 | 1. Tools are defined with a name, description, input schema, and handler function
 94 | 2. Tools are registered with the tool registry during server initialization
 95 | 3. When a client connects, available tools are advertised through the `tools/list` method
 96 | 4. Clients can execute tools via the `tools/call` or `tools/execute` methods
 97 | 
 98 | ### Tool Execution Flow
 99 | 
100 | 1. Client sends a tool execution request with tool name and arguments
101 | 2. Server validates the arguments against the tool's input schema
102 | 3. If validation passes, the tool handler is executed with a context
103 | 4. Progress updates are sent during execution if requested
104 | 5. Results are formatted according to the MCP specification and returned to the client
105 | 
106 | ### Error Handling Flow
107 | 
108 | 1. If input validation fails, a structured error response is returned
109 | 2. If tool execution fails, the error is captured and returned in a format visible to LLMs
110 | 3. If the tool is not found or the request format is invalid, appropriate error codes are returned
111 | 
112 | ## Testing Strategy
113 | 
114 | 1. Test basic tool execution with the standard tools
115 | 2. Test editor-specific tools with mocked editor context
116 | 3. Test error handling with invalid inputs
117 | 4. Test progress reporting with long-running tools
118 | 5. Test timeouts with deliberately slow tools
119 | 
120 | ## Future Improvements
121 | 
122 | 1. Implement full JSON Schema validation for tool inputs
123 | 2. Add more editor-specific tools leveraging editor context
124 | 3. Implement persistent storage for tool results
125 | 4. Add authentication and authorization for tool execution
126 | 5. Implement streaming tool results for long-running operations 
```

--------------------------------------------------------------------------------
/pkg/dbtools/performance_test.go:
--------------------------------------------------------------------------------

```go
  1 | package dbtools
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"testing"
  6 | 	"time"
  7 | )
  8 | 
  9 | func TestPerformanceAnalyzer(t *testing.T) {
 10 | 	// Get the global performance analyzer and reset it to ensure clean state
 11 | 	analyzer := GetPerformanceAnalyzer()
 12 | 	analyzer.Reset()
 13 | 
 14 | 	// Ensure we restore previous state after test
 15 | 	defer func() {
 16 | 		analyzer.Reset()
 17 | 	}()
 18 | 
 19 | 	// Test tracking a query
 20 | 	ctx := context.Background()
 21 | 	result, err := analyzer.TrackQuery(ctx, "SELECT * FROM test_table", []interface{}{}, func() (interface{}, error) {
 22 | 		// Simulate query execution with sleep
 23 | 		time.Sleep(5 * time.Millisecond)
 24 | 		return "test result", nil
 25 | 	})
 26 | 
 27 | 	// Check results
 28 | 	if err != nil {
 29 | 		t.Errorf("Expected no error, got %v", err)
 30 | 	}
 31 | 
 32 | 	if result != "test result" {
 33 | 		t.Errorf("Expected result to be 'test result', got %v", result)
 34 | 	}
 35 | 
 36 | 	// Add a small delay to ensure async metrics update completes
 37 | 	time.Sleep(10 * time.Millisecond)
 38 | 
 39 | 	// Check metrics were collected
 40 | 	metrics := analyzer.GetAllMetrics()
 41 | 	if len(metrics) == 0 {
 42 | 		t.Error("Expected metrics to be collected, got none")
 43 | 	}
 44 | 
 45 | 	// Find the test query in metrics
 46 | 	var foundMetrics *QueryMetrics
 47 | 	for _, m := range metrics {
 48 | 		if normalizeQuery(m.Query) == normalizeQuery("SELECT * FROM test_table") {
 49 | 			foundMetrics = m
 50 | 			break
 51 | 		}
 52 | 	}
 53 | 
 54 | 	if foundMetrics == nil {
 55 | 		t.Error("Expected to find metrics for the test query, got none")
 56 | 	} else {
 57 | 		// Check metrics values
 58 | 		if foundMetrics.Count != 1 {
 59 | 			t.Errorf("Expected count to be 1, got %d", foundMetrics.Count)
 60 | 		}
 61 | 
 62 | 		if foundMetrics.AvgDuration < time.Millisecond {
 63 | 			t.Errorf("Expected average duration to be at least 1ms, got %v", foundMetrics.AvgDuration)
 64 | 		}
 65 | 	}
 66 | }
 67 | 
 68 | func TestQueryAnalyzer(t *testing.T) {
 69 | 	testCases := []struct {
 70 | 		name        string
 71 | 		query       string
 72 | 		expectation string
 73 | 	}{
 74 | 		{
 75 | 			name:        "SELECT * detection",
 76 | 			query:       "SELECT * FROM users",
 77 | 			expectation: "Avoid using SELECT * - specify only the columns you need",
 78 | 		},
 79 | 		{
 80 | 			name:        "Missing WHERE detection",
 81 | 			query:       "SELECT id, name FROM users",
 82 | 			expectation: "Consider adding a WHERE clause to limit the result set",
 83 | 		},
 84 | 		{
 85 | 			name:        "JOIN without ON detection",
 86 | 			query:       "SELECT u.id, p.name FROM users u JOIN profiles p",
 87 | 			expectation: "Ensure all JOINs have proper conditions",
 88 | 		},
 89 | 		{
 90 | 			name:        "ORDER BY detection",
 91 | 			query:       "SELECT id, name FROM users WHERE id > 100 ORDER BY name",
 92 | 			expectation: "Verify that ORDER BY columns are properly indexed",
 93 | 		},
 94 | 		{
 95 | 			name:        "Subquery detection",
 96 | 			query:       "SELECT id, name FROM users WHERE id IN (SELECT user_id FROM orders)",
 97 | 			expectation: "Consider replacing subqueries with JOINs where possible",
 98 | 		},
 99 | 	}
100 | 
101 | 	for _, tc := range testCases {
102 | 		t.Run(tc.name, func(t *testing.T) {
103 | 			suggestions := AnalyzeQuery(tc.query)
104 | 
105 | 			// Check if the expected suggestion is in the list
106 | 			found := false
107 | 			for _, s := range suggestions {
108 | 				if s == tc.expectation {
109 | 					found = true
110 | 					break
111 | 				}
112 | 			}
113 | 
114 | 			if !found {
115 | 				t.Errorf("Expected to find suggestion '%s' for query '%s', but got suggestions: %v",
116 | 					tc.expectation, tc.query, suggestions)
117 | 			}
118 | 		})
119 | 	}
120 | }
121 | 
122 | func TestNormalizeQuery(t *testing.T) {
123 | 	testCases := []struct {
124 | 		name     string
125 | 		input    string
126 | 		expected string
127 | 	}{
128 | 		{
129 | 			name:     "Number replacement",
130 | 			input:    "SELECT * FROM users WHERE id = 123",
131 | 			expected: "SELECT * FROM users WHERE id = ?",
132 | 		},
133 | 		{
134 | 			name:     "String replacement",
135 | 			input:    "SELECT * FROM users WHERE name = 'John Doe'",
136 | 			expected: "SELECT * FROM users WHERE name = '?'",
137 | 		},
138 | 		{
139 | 			name:     "Double quotes replacement",
140 | 			input:    "SELECT * FROM \"users\" WHERE \"name\" = \"John Doe\"",
141 | 			expected: "SELECT * FROM \"?\" WHERE \"?\" = \"?\"",
142 | 		},
143 | 		{
144 | 			name:     "Multiple whitespace",
145 | 			input:    "SELECT   *   FROM   users",
146 | 			expected: "SELECT * FROM users",
147 | 		},
148 | 		{
149 | 			name:     "Complex query",
150 | 			input:    "SELECT u.id, p.name FROM users u JOIN profiles p ON u.id = 123 AND p.name = 'test'",
151 | 			expected: "SELECT u.id, p.name FROM users u JOIN profiles p ON u.id = ? AND p.name = '?'",
152 | 		},
153 | 	}
154 | 
155 | 	for _, tc := range testCases {
156 | 		t.Run(tc.name, func(t *testing.T) {
157 | 			result := normalizeQuery(tc.input)
158 | 			if result != tc.expected {
159 | 				t.Errorf("Expected normalized query '%s', got '%s'", tc.expected, result)
160 | 			}
161 | 		})
162 | 	}
163 | }
164 | 
```

--------------------------------------------------------------------------------
/internal/logger/logger_test.go:
--------------------------------------------------------------------------------

```go
  1 | package logger
  2 | 
  3 | import (
  4 | 	"bytes"
  5 | 	"errors"
  6 | 	"testing"
  7 | 
  8 | 	"github.com/stretchr/testify/assert"
  9 | 	"go.uber.org/zap"
 10 | 	"go.uber.org/zap/zapcore"
 11 | 	"go.uber.org/zap/zaptest"
 12 | )
 13 | 
 14 | // captureOutput captures log output during a test
 15 | func captureOutput(f func()) string {
 16 | 	var buf bytes.Buffer
 17 | 
 18 | 	// Create a custom zap logger that writes to our buffer
 19 | 	encoderConfig := zap.NewDevelopmentEncoderConfig()
 20 | 	encoder := zapcore.NewConsoleEncoder(encoderConfig)
 21 | 	core := zapcore.NewCore(encoder, zapcore.AddSync(&buf), zapcore.DebugLevel)
 22 | 
 23 | 	oldLogger := zapLogger
 24 | 	zapLogger = zap.New(core)
 25 | 	defer func() { zapLogger = oldLogger }()
 26 | 
 27 | 	f()
 28 | 	return buf.String()
 29 | }
 30 | 
 31 | func TestSetLogLevel(t *testing.T) {
 32 | 	tests := []struct {
 33 | 		level    string
 34 | 		expected Level
 35 | 	}{
 36 | 		{"debug", LevelDebug},
 37 | 		{"DEBUG", LevelDebug},
 38 | 		{"info", LevelInfo},
 39 | 		{"INFO", LevelInfo},
 40 | 		{"warn", LevelWarn},
 41 | 		{"WARN", LevelWarn},
 42 | 		{"error", LevelError},
 43 | 		{"ERROR", LevelError},
 44 | 		{"unknown", LevelInfo}, // Default
 45 | 	}
 46 | 
 47 | 	for _, tt := range tests {
 48 | 		t.Run(tt.level, func(t *testing.T) {
 49 | 			setLogLevel(tt.level)
 50 | 			assert.Equal(t, tt.expected, logLevel)
 51 | 		})
 52 | 	}
 53 | }
 54 | 
 55 | func TestZapLevelConversion(t *testing.T) {
 56 | 	tests := []struct {
 57 | 		level         Level
 58 | 		expectedLevel zapcore.Level
 59 | 	}{
 60 | 		{LevelDebug, zapcore.DebugLevel},
 61 | 		{LevelInfo, zapcore.InfoLevel},
 62 | 		{LevelWarn, zapcore.WarnLevel},
 63 | 		{LevelError, zapcore.ErrorLevel},
 64 | 	}
 65 | 
 66 | 	for _, tt := range tests {
 67 | 		t.Run(zapcore.Level(tt.expectedLevel).String(), func(t *testing.T) {
 68 | 			atomicLevel := getZapLevel(tt.level)
 69 | 			assert.Equal(t, tt.expectedLevel, atomicLevel.Level())
 70 | 		})
 71 | 	}
 72 | }
 73 | 
 74 | func TestDebug(t *testing.T) {
 75 | 	// Setup test logger
 76 | 	zapLogger = zaptest.NewLogger(t)
 77 | 
 78 | 	// Test when debug is enabled
 79 | 	logLevel = LevelDebug
 80 | 	output := captureOutput(func() {
 81 | 		Debug("Test debug message: %s", "value")
 82 | 	})
 83 | 	assert.Contains(t, output, "DEBUG")
 84 | 	assert.Contains(t, output, "Test debug message: value")
 85 | 
 86 | 	// Test when debug is disabled
 87 | 	logLevel = LevelInfo
 88 | 	output = captureOutput(func() {
 89 | 		Debug("This should not appear")
 90 | 	})
 91 | 	assert.Empty(t, output)
 92 | }
 93 | 
 94 | func TestInfo(t *testing.T) {
 95 | 	// Setup test logger
 96 | 	zapLogger = zaptest.NewLogger(t)
 97 | 
 98 | 	// Test when info is enabled
 99 | 	logLevel = LevelInfo
100 | 	output := captureOutput(func() {
101 | 		Info("Test info message: %s", "value")
102 | 	})
103 | 	assert.Contains(t, output, "INFO")
104 | 	assert.Contains(t, output, "Test info message: value")
105 | 
106 | 	// Test when info is disabled
107 | 	logLevel = LevelError
108 | 	output = captureOutput(func() {
109 | 		Info("This should not appear")
110 | 	})
111 | 	assert.Empty(t, output)
112 | }
113 | 
114 | func TestWarn(t *testing.T) {
115 | 	// Setup test logger
116 | 	zapLogger = zaptest.NewLogger(t)
117 | 
118 | 	// Test when warn is enabled
119 | 	logLevel = LevelWarn
120 | 	output := captureOutput(func() {
121 | 		Warn("Test warn message: %s", "value")
122 | 	})
123 | 	assert.Contains(t, output, "WARN")
124 | 	assert.Contains(t, output, "Test warn message: value")
125 | 
126 | 	// Test when warn is disabled
127 | 	logLevel = LevelError
128 | 	output = captureOutput(func() {
129 | 		Warn("This should not appear")
130 | 	})
131 | 	assert.Empty(t, output)
132 | }
133 | 
134 | func TestError(t *testing.T) {
135 | 	// Setup test logger
136 | 	zapLogger = zaptest.NewLogger(t)
137 | 
138 | 	// Error should always be logged when level is error
139 | 	logLevel = LevelError
140 | 	output := captureOutput(func() {
141 | 		Error("Test error message: %s", "value")
142 | 	})
143 | 	assert.Contains(t, output, "ERROR")
144 | 	assert.Contains(t, output, "Test error message: value")
145 | }
146 | 
147 | func TestErrorWithStack(t *testing.T) {
148 | 	// Setup test logger
149 | 	zapLogger = zaptest.NewLogger(t)
150 | 
151 | 	err := errors.New("test error")
152 | 	output := captureOutput(func() {
153 | 		ErrorWithStack(err)
154 | 	})
155 | 	assert.Contains(t, output, "ERROR")
156 | 	assert.Contains(t, output, "test error")
157 | 	assert.Contains(t, output, "stack")
158 | }
159 | 
160 | // For the structured logging tests, we'll just test that the functions don't panic
161 | 
162 | func TestRequestLog(t *testing.T) {
163 | 	zapLogger = zaptest.NewLogger(t)
164 | 	logLevel = LevelDebug
165 | 	assert.NotPanics(t, func() {
166 | 		RequestLog("POST", "/api/data", "session123", `{"key":"value"}`)
167 | 	})
168 | }
169 | 
170 | func TestResponseLog(t *testing.T) {
171 | 	zapLogger = zaptest.NewLogger(t)
172 | 	logLevel = LevelDebug
173 | 	assert.NotPanics(t, func() {
174 | 		ResponseLog(200, "session123", `{"result":"success"}`)
175 | 	})
176 | }
177 | 
178 | func TestSSEEventLog(t *testing.T) {
179 | 	zapLogger = zaptest.NewLogger(t)
180 | 	logLevel = LevelDebug
181 | 	assert.NotPanics(t, func() {
182 | 		SSEEventLog("message", "session123", `{"data":"content"}`)
183 | 	})
184 | }
185 | 
186 | func TestRequestResponseLog(t *testing.T) {
187 | 	zapLogger = zaptest.NewLogger(t)
188 | 	logLevel = LevelDebug
189 | 	assert.NotPanics(t, func() {
190 | 		RequestResponseLog("RPC", "session123", `{"method":"getData"}`, `{"result":"data"}`)
191 | 	})
192 | }
193 | 
```

--------------------------------------------------------------------------------
/examples/postgres_connection.go:
--------------------------------------------------------------------------------

```go
  1 | package main
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"fmt"
  6 | 	"log"
  7 | 	"os"
  8 | 	"time"
  9 | 
 10 | 	"github.com/FreePeak/db-mcp-server/pkg/db"
 11 | )
 12 | 
 13 | func main() {
 14 | 	// Example 1: Direct PostgreSQL 17 connection
 15 | 	connectDirectly()
 16 | 
 17 | 	// Example 2: Using the DB Manager with configuration file
 18 | 	connectWithManager()
 19 | }
 20 | 
 21 | func connectDirectly() {
 22 | 	fmt.Println("=== Example 1: Direct PostgreSQL 17 Connection ===")
 23 | 
 24 | 	// Create configuration for PostgreSQL 17
 25 | 	config := db.Config{
 26 | 		Type:     "postgres",
 27 | 		Host:     getEnv("POSTGRES_HOST", "localhost"),
 28 | 		Port:     5432,
 29 | 		User:     getEnv("POSTGRES_USER", "postgres"),
 30 | 		Password: getEnv("POSTGRES_PASSWORD", "postgres"),
 31 | 		Name:     getEnv("POSTGRES_DB", "postgres"),
 32 | 
 33 | 		// PostgreSQL 17 specific options
 34 | 		SSLMode:            db.SSLPrefer,
 35 | 		ApplicationName:    "db-mcp-example",
 36 | 		ConnectTimeout:     10,
 37 | 		TargetSessionAttrs: "any", // Works with PostgreSQL 10+
 38 | 
 39 | 		// Additional options
 40 | 		Options: map[string]string{
 41 | 			"client_encoding": "UTF8",
 42 | 		},
 43 | 
 44 | 		// Connection pool settings
 45 | 		MaxOpenConns:    10,
 46 | 		MaxIdleConns:    5,
 47 | 		ConnMaxLifetime: 5 * time.Minute,
 48 | 		ConnMaxIdleTime: 5 * time.Minute,
 49 | 	}
 50 | 
 51 | 	// Create database connection
 52 | 	database, err := db.NewDatabase(config)
 53 | 	if err != nil {
 54 | 		log.Fatalf("Failed to create database instance: %v", err)
 55 | 	}
 56 | 
 57 | 	// Connect to the database
 58 | 	fmt.Println("Connecting to PostgreSQL...")
 59 | 	if err := database.Connect(); err != nil {
 60 | 		log.Fatalf("Failed to connect to database: %v", err)
 61 | 	}
 62 | 	defer database.Close()
 63 | 
 64 | 	fmt.Println("Successfully connected to PostgreSQL")
 65 | 	fmt.Println("Connection string (masked): ", database.ConnectionString())
 66 | 
 67 | 	// Query PostgreSQL version to verify compatibility
 68 | 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 69 | 	defer cancel()
 70 | 
 71 | 	var version string
 72 | 	err = database.QueryRow(ctx, "SELECT version()").Scan(&version)
 73 | 	if err != nil {
 74 | 		log.Fatalf("Failed to query PostgreSQL version: %v", err)
 75 | 	}
 76 | 
 77 | 	fmt.Printf("Connected to: %s\n", version)
 78 | 
 79 | 	// Run a sample query with PostgreSQL-style placeholders
 80 | 	rows, err := database.Query(ctx, "SELECT datname FROM pg_database WHERE datistemplate = $1", false)
 81 | 	if err != nil {
 82 | 		log.Fatalf("Query failed: %v", err)
 83 | 	}
 84 | 	defer rows.Close()
 85 | 
 86 | 	fmt.Println("\nAvailable databases:")
 87 | 	for rows.Next() {
 88 | 		var dbName string
 89 | 		if err := rows.Scan(&dbName); err != nil {
 90 | 			log.Printf("Failed to scan row: %v", err)
 91 | 			continue
 92 | 		}
 93 | 		fmt.Printf("- %s\n", dbName)
 94 | 	}
 95 | 
 96 | 	if err = rows.Err(); err != nil {
 97 | 		log.Printf("Error during row iteration: %v", err)
 98 | 	}
 99 | 
100 | 	fmt.Println()
101 | }
102 | 
103 | func connectWithManager() {
104 | 	fmt.Println("=== Example 2: Using DB Manager with Configuration ===")
105 | 
106 | 	// Create a database manager
107 | 	manager := db.NewDBManager()
108 | 
109 | 	// Create sample configuration with PostgreSQL 17 settings
110 | 	config := []byte(`{
111 | 		"connections": [
112 | 			{
113 | 				"id": "postgres17",
114 | 				"type": "postgres",
115 | 				"host": "localhost", 
116 | 				"port": 5432,
117 | 				"name": "postgres",
118 | 				"user": "postgres",
119 | 				"password": "postgres",
120 | 				"ssl_mode": "prefer",
121 | 				"application_name": "db-mcp-example",
122 | 				"connect_timeout": 10,
123 | 				"target_session_attrs": "any",
124 | 				"options": {
125 | 					"client_encoding": "UTF8"
126 | 				},
127 | 				"max_open_conns": 10,
128 | 				"max_idle_conns": 5,
129 | 				"conn_max_lifetime_seconds": 300,
130 | 				"conn_max_idle_time_seconds": 60
131 | 			}
132 | 		]
133 | 	}`)
134 | 
135 | 	// Update with environment variables
136 | 	// In a real application, you would load this from a file
137 | 	// and use proper environment variable substitution
138 | 
139 | 	// Load configuration
140 | 	if err := manager.LoadConfig(config); err != nil {
141 | 		log.Fatalf("Failed to load database config: %v", err)
142 | 	}
143 | 
144 | 	// Connect to databases
145 | 	fmt.Println("Connecting to all configured databases...")
146 | 	if err := manager.Connect(); err != nil {
147 | 		log.Fatalf("Failed to connect to databases: %v", err)
148 | 	}
149 | 	defer manager.CloseAll()
150 | 
151 | 	// Get a specific database connection
152 | 	database, err := manager.GetDatabase("postgres17")
153 | 	if err != nil {
154 | 		log.Fatalf("Failed to get database: %v", err)
155 | 	}
156 | 
157 | 	fmt.Println("Successfully connected to PostgreSQL via manager")
158 | 	fmt.Println("Connection string (masked): ", database.ConnectionString())
159 | 
160 | 	// Query PostgreSQL version to verify compatibility
161 | 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
162 | 	defer cancel()
163 | 
164 | 	var version string
165 | 	err = database.QueryRow(ctx, "SELECT version()").Scan(&version)
166 | 	if err != nil {
167 | 		log.Fatalf("Failed to query PostgreSQL version: %v", err)
168 | 	}
169 | 
170 | 	fmt.Printf("Connected to: %s\n", version)
171 | 	fmt.Println()
172 | }
173 | 
174 | // Helper function to get environment variable with fallback
175 | func getEnv(key, fallback string) string {
176 | 	if value, exists := os.LookupEnv(key); exists {
177 | 		return value
178 | 	}
179 | 	return fallback
180 | }
181 | 
```
Page 1/7FirstPrevNextLast