#
tokens: 38191/50000 4/102 files (page 5/7)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 5 of 7. Use http://codebase.md/freepeak/db-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cm
│   └── gitstream.cm
├── .cursor
│   ├── mcp-example.json
│   ├── mcp.json
│   └── rules
│       └── global.mdc
├── .dockerignore
├── .DS_Store
├── .env.example
├── .github
│   ├── FUNDING.yml
│   └── workflows
│       └── go.yml
├── .gitignore
├── .golangci.yml
├── assets
│   └── logo.svg
├── CHANGELOG.md
├── cmd
│   └── server
│       └── main.go
├── commit-message.txt
├── config.json
├── config.timescaledb-test.json
├── docker-compose.mcp-test.yml
├── docker-compose.test.yml
├── docker-compose.timescaledb-test.yml
├── docker-compose.yml
├── docker-wrapper.sh
├── Dockerfile
├── docs
│   ├── REFACTORING.md
│   ├── TIMESCALEDB_FUNCTIONS.md
│   ├── TIMESCALEDB_IMPLEMENTATION.md
│   ├── TIMESCALEDB_PRD.md
│   └── TIMESCALEDB_TOOLS.md
├── examples
│   └── postgres_connection.go
├── glama.json
├── go.mod
├── go.sum
├── init-scripts
│   └── timescaledb
│       ├── 01-init.sql
│       ├── 02-sample-data.sql
│       ├── 03-continuous-aggregates.sql
│       └── README.md
├── internal
│   ├── config
│   │   ├── config_test.go
│   │   └── config.go
│   ├── delivery
│   │   └── mcp
│   │       ├── compression_policy_test.go
│   │       ├── context
│   │       │   ├── hypertable_schema_test.go
│   │       │   ├── timescale_completion_test.go
│   │       │   ├── timescale_context_test.go
│   │       │   └── timescale_query_suggestion_test.go
│   │       ├── mock_test.go
│   │       ├── response_test.go
│   │       ├── response.go
│   │       ├── retention_policy_test.go
│   │       ├── server_wrapper.go
│   │       ├── timescale_completion.go
│   │       ├── timescale_context.go
│   │       ├── timescale_schema.go
│   │       ├── timescale_tool_test.go
│   │       ├── timescale_tool.go
│   │       ├── timescale_tools_test.go
│   │       ├── tool_registry.go
│   │       └── tool_types.go
│   ├── domain
│   │   └── database.go
│   ├── logger
│   │   ├── logger_test.go
│   │   └── logger.go
│   ├── repository
│   │   └── database_repository.go
│   └── usecase
│       └── database_usecase.go
├── LICENSE
├── Makefile
├── pkg
│   ├── core
│   │   ├── core.go
│   │   └── logging.go
│   ├── db
│   │   ├── db_test.go
│   │   ├── db.go
│   │   ├── manager.go
│   │   ├── README.md
│   │   └── timescale
│   │       ├── config_test.go
│   │       ├── config.go
│   │       ├── connection_test.go
│   │       ├── connection.go
│   │       ├── continuous_aggregate_test.go
│   │       ├── continuous_aggregate.go
│   │       ├── hypertable_test.go
│   │       ├── hypertable.go
│   │       ├── metadata.go
│   │       ├── mocks_test.go
│   │       ├── policy_test.go
│   │       ├── policy.go
│   │       ├── query.go
│   │       ├── timeseries_test.go
│   │       └── timeseries.go
│   ├── dbtools
│   │   ├── db_helpers.go
│   │   ├── dbtools_test.go
│   │   ├── dbtools.go
│   │   ├── exec.go
│   │   ├── performance_test.go
│   │   ├── performance.go
│   │   ├── query.go
│   │   ├── querybuilder_test.go
│   │   ├── querybuilder.go
│   │   ├── README.md
│   │   ├── schema_test.go
│   │   ├── schema.go
│   │   ├── tx_test.go
│   │   └── tx.go
│   ├── internal
│   │   └── logger
│   │       └── logger.go
│   ├── jsonrpc
│   │   └── jsonrpc.go
│   ├── logger
│   │   └── logger.go
│   └── tools
│       └── tools.go
├── README-old.md
├── README.md
├── repomix-output.txt
├── request.json
├── start-mcp.sh
├── test.Dockerfile
├── timescaledb-test.sh
└── wait-for-it.sh
```

# Files

--------------------------------------------------------------------------------
/pkg/dbtools/querybuilder.go:
--------------------------------------------------------------------------------

```go
  1 | package dbtools
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"fmt"
  6 | 	"regexp"
  7 | 	"strconv"
  8 | 	"strings"
  9 | 	"time"
 10 | 
 11 | 	"github.com/FreePeak/db-mcp-server/pkg/db"
 12 | 	"github.com/FreePeak/db-mcp-server/pkg/logger"
 13 | 	"github.com/FreePeak/db-mcp-server/pkg/tools"
 14 | )
 15 | 
 16 | // QueryComponents represents the components of a SQL query
 17 | type QueryComponents struct {
 18 | 	Select  []string     `json:"select"`
 19 | 	From    string       `json:"from"`
 20 | 	Joins   []JoinClause `json:"joins"`
 21 | 	Where   []Condition  `json:"where"`
 22 | 	GroupBy []string     `json:"groupBy"`
 23 | 	Having  []string     `json:"having"`
 24 | 	OrderBy []OrderBy    `json:"orderBy"`
 25 | 	Limit   int          `json:"limit"`
 26 | 	Offset  int          `json:"offset"`
 27 | }
 28 | 
 29 | // JoinClause represents a SQL JOIN clause
 30 | type JoinClause struct {
 31 | 	Type  string `json:"type"`
 32 | 	Table string `json:"table"`
 33 | 	On    string `json:"on"`
 34 | }
 35 | 
 36 | // Condition represents a WHERE condition
 37 | type Condition struct {
 38 | 	Column    string `json:"column"`
 39 | 	Operator  string `json:"operator"`
 40 | 	Value     string `json:"value"`
 41 | 	Connector string `json:"connector"`
 42 | }
 43 | 
 44 | // OrderBy represents an ORDER BY clause
 45 | type OrderBy struct {
 46 | 	Column    string `json:"column"`
 47 | 	Direction string `json:"direction"`
 48 | }
 49 | 
 50 | // createQueryBuilderTool creates a tool for building and validating SQL queries
 51 | func createQueryBuilderTool() *tools.Tool {
 52 | 	return &tools.Tool{
 53 | 		Name:        "dbQueryBuilder",
 54 | 		Description: "Visual SQL query construction with syntax validation",
 55 | 		Category:    "database",
 56 | 		InputSchema: tools.ToolInputSchema{
 57 | 			Type: "object",
 58 | 			Properties: map[string]interface{}{
 59 | 				"action": map[string]interface{}{
 60 | 					"type":        "string",
 61 | 					"description": "Action to perform (validate, build, analyze)",
 62 | 					"enum":        []string{"validate", "build", "analyze"},
 63 | 				},
 64 | 				"query": map[string]interface{}{
 65 | 					"type":        "string",
 66 | 					"description": "SQL query to validate or analyze",
 67 | 				},
 68 | 				"components": map[string]interface{}{
 69 | 					"type":        "object",
 70 | 					"description": "Query components for building a query",
 71 | 					"properties": map[string]interface{}{
 72 | 						"select": map[string]interface{}{
 73 | 							"type":        "array",
 74 | 							"description": "Columns to select",
 75 | 							"items": map[string]interface{}{
 76 | 								"type": "string",
 77 | 							},
 78 | 						},
 79 | 						"from": map[string]interface{}{
 80 | 							"type":        "string",
 81 | 							"description": "Table to select from",
 82 | 						},
 83 | 						"joins": map[string]interface{}{
 84 | 							"type":        "array",
 85 | 							"description": "Joins to include",
 86 | 							"items": map[string]interface{}{
 87 | 								"type": "object",
 88 | 								"properties": map[string]interface{}{
 89 | 									"type": map[string]interface{}{
 90 | 										"type": "string",
 91 | 										"enum": []string{"inner", "left", "right", "full"},
 92 | 									},
 93 | 									"table": map[string]interface{}{
 94 | 										"type": "string",
 95 | 									},
 96 | 									"on": map[string]interface{}{
 97 | 										"type": "string",
 98 | 									},
 99 | 								},
100 | 							},
101 | 						},
102 | 						"where": map[string]interface{}{
103 | 							"type":        "array",
104 | 							"description": "Where conditions",
105 | 							"items": map[string]interface{}{
106 | 								"type": "object",
107 | 								"properties": map[string]interface{}{
108 | 									"column": map[string]interface{}{
109 | 										"type": "string",
110 | 									},
111 | 									"operator": map[string]interface{}{
112 | 										"type": "string",
113 | 										"enum": []string{"=", "!=", "<", ">", "<=", ">=", "LIKE", "IN", "NOT IN", "IS NULL", "IS NOT NULL"},
114 | 									},
115 | 									"value": map[string]interface{}{
116 | 										"type": "string",
117 | 									},
118 | 									"connector": map[string]interface{}{
119 | 										"type": "string",
120 | 										"enum": []string{"AND", "OR"},
121 | 									},
122 | 								},
123 | 							},
124 | 						},
125 | 						"groupBy": map[string]interface{}{
126 | 							"type":        "array",
127 | 							"description": "Columns to group by",
128 | 							"items": map[string]interface{}{
129 | 								"type": "string",
130 | 							},
131 | 						},
132 | 						"having": map[string]interface{}{
133 | 							"type":        "array",
134 | 							"description": "Having conditions",
135 | 							"items": map[string]interface{}{
136 | 								"type": "string",
137 | 							},
138 | 						},
139 | 						"orderBy": map[string]interface{}{
140 | 							"type":        "array",
141 | 							"description": "Columns to order by",
142 | 							"items": map[string]interface{}{
143 | 								"type": "object",
144 | 								"properties": map[string]interface{}{
145 | 									"column": map[string]interface{}{
146 | 										"type": "string",
147 | 									},
148 | 									"direction": map[string]interface{}{
149 | 										"type": "string",
150 | 										"enum": []string{"ASC", "DESC"},
151 | 									},
152 | 								},
153 | 							},
154 | 						},
155 | 						"limit": map[string]interface{}{
156 | 							"type":        "integer",
157 | 							"description": "Limit results",
158 | 						},
159 | 						"offset": map[string]interface{}{
160 | 							"type":        "integer",
161 | 							"description": "Offset results",
162 | 						},
163 | 					},
164 | 				},
165 | 				"timeout": map[string]interface{}{
166 | 					"type":        "integer",
167 | 					"description": "Execution timeout in milliseconds (default: 5000)",
168 | 				},
169 | 				"database": map[string]interface{}{
170 | 					"type":        "string",
171 | 					"description": "Database ID to use (optional if only one database is configured)",
172 | 				},
173 | 			},
174 | 			Required: []string{"action", "database"},
175 | 		},
176 | 		Handler: handleQueryBuilder,
177 | 	}
178 | }
179 | 
180 | // handleQueryBuilder handles the query builder tool execution
181 | func handleQueryBuilder(ctx context.Context, params map[string]interface{}) (interface{}, error) {
182 | 	// Check if database manager is initialized
183 | 	if dbManager == nil {
184 | 		return nil, fmt.Errorf("database manager not initialized")
185 | 	}
186 | 
187 | 	// Extract parameters
188 | 	action, ok := getStringParam(params, "action")
189 | 	if !ok {
190 | 		return nil, fmt.Errorf("action parameter is required")
191 | 	}
192 | 
193 | 	// Get database ID
194 | 	databaseID, ok := getStringParam(params, "database")
195 | 	if !ok {
196 | 		return nil, fmt.Errorf("database parameter is required")
197 | 	}
198 | 
199 | 	// Get database instance
200 | 	db, err := dbManager.GetDatabase(databaseID)
201 | 	if err != nil {
202 | 		return nil, fmt.Errorf("failed to get database: %w", err)
203 | 	}
204 | 
205 | 	// Extract query parameter
206 | 	query, _ := getStringParam(params, "query")
207 | 
208 | 	// Extract components parameter
209 | 	var components QueryComponents
210 | 	if componentsMap, ok := params["components"].(map[string]interface{}); ok {
211 | 		// Parse components from map
212 | 		if err := parseQueryComponents(&components, componentsMap); err != nil {
213 | 			return nil, fmt.Errorf("failed to parse query components: %w", err)
214 | 		}
215 | 	}
216 | 
217 | 	// Create context with timeout
218 | 	dbTimeout := db.QueryTimeout() * 1000 // Convert from seconds to milliseconds
219 | 	timeout := dbTimeout                  // Default to the database's query timeout
220 | 	if timeoutParam, ok := getIntParam(params, "timeout"); ok {
221 | 		timeout = timeoutParam
222 | 	}
223 | 
224 | 	timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)
225 | 	defer cancel()
226 | 
227 | 	// Execute requested action
228 | 	switch action {
229 | 	case "validate":
230 | 		if query == "" {
231 | 			return nil, fmt.Errorf("query parameter is required for validate action")
232 | 		}
233 | 		return validateQuery(timeoutCtx, db, query)
234 | 	case "build":
235 | 		if err := validateQueryComponents(&components); err != nil {
236 | 			return nil, fmt.Errorf("invalid query components: %w", err)
237 | 		}
238 | 		builtQuery, err := buildQueryFromComponents(&components)
239 | 		if err != nil {
240 | 			return nil, fmt.Errorf("failed to build query: %w", err)
241 | 		}
242 | 		return validateQuery(timeoutCtx, db, builtQuery)
243 | 	case "analyze":
244 | 		if query == "" {
245 | 			return nil, fmt.Errorf("query parameter is required for analyze action")
246 | 		}
247 | 		return analyzeQueryPlan(timeoutCtx, db, query)
248 | 	default:
249 | 		return nil, fmt.Errorf("invalid action: %s", action)
250 | 	}
251 | }
252 | 
253 | // parseQueryComponents parses query components from a map
254 | func parseQueryComponents(components *QueryComponents, data map[string]interface{}) error {
255 | 	// Parse SELECT columns
256 | 	if selectArr, ok := data["select"].([]interface{}); ok {
257 | 		components.Select = make([]string, len(selectArr))
258 | 		for i, col := range selectArr {
259 | 			if str, ok := col.(string); ok {
260 | 				components.Select[i] = str
261 | 			}
262 | 		}
263 | 	}
264 | 
265 | 	// Parse FROM table
266 | 	if from, ok := data["from"].(string); ok {
267 | 		components.From = from
268 | 	}
269 | 
270 | 	// Parse JOINs
271 | 	if joinsArr, ok := data["joins"].([]interface{}); ok {
272 | 		components.Joins = make([]JoinClause, len(joinsArr))
273 | 		for i, join := range joinsArr {
274 | 			if joinMap, ok := join.(map[string]interface{}); ok {
275 | 				if joinType, ok := joinMap["type"].(string); ok {
276 | 					components.Joins[i].Type = joinType
277 | 				}
278 | 				if table, ok := joinMap["table"].(string); ok {
279 | 					components.Joins[i].Table = table
280 | 				}
281 | 				if on, ok := joinMap["on"].(string); ok {
282 | 					components.Joins[i].On = on
283 | 				}
284 | 			}
285 | 		}
286 | 	}
287 | 
288 | 	// Parse WHERE conditions
289 | 	if whereArr, ok := data["where"].([]interface{}); ok {
290 | 		components.Where = make([]Condition, len(whereArr))
291 | 		for i, cond := range whereArr {
292 | 			if condMap, ok := cond.(map[string]interface{}); ok {
293 | 				if col, ok := condMap["column"].(string); ok {
294 | 					components.Where[i].Column = col
295 | 				}
296 | 				if op, ok := condMap["operator"].(string); ok {
297 | 					components.Where[i].Operator = op
298 | 				}
299 | 				if val, ok := condMap["value"].(string); ok {
300 | 					components.Where[i].Value = val
301 | 				}
302 | 				if conn, ok := condMap["connector"].(string); ok {
303 | 					components.Where[i].Connector = conn
304 | 				}
305 | 			}
306 | 		}
307 | 	}
308 | 
309 | 	// Parse GROUP BY columns
310 | 	if groupByArr, ok := data["groupBy"].([]interface{}); ok {
311 | 		components.GroupBy = make([]string, len(groupByArr))
312 | 		for i, col := range groupByArr {
313 | 			if str, ok := col.(string); ok {
314 | 				components.GroupBy[i] = str
315 | 			}
316 | 		}
317 | 	}
318 | 
319 | 	// Parse HAVING conditions
320 | 	if havingArr, ok := data["having"].([]interface{}); ok {
321 | 		components.Having = make([]string, len(havingArr))
322 | 		for i, cond := range havingArr {
323 | 			if str, ok := cond.(string); ok {
324 | 				components.Having[i] = str
325 | 			}
326 | 		}
327 | 	}
328 | 
329 | 	// Parse ORDER BY clauses
330 | 	if orderByArr, ok := data["orderBy"].([]interface{}); ok {
331 | 		components.OrderBy = make([]OrderBy, len(orderByArr))
332 | 		for i, order := range orderByArr {
333 | 			if orderMap, ok := order.(map[string]interface{}); ok {
334 | 				if col, ok := orderMap["column"].(string); ok {
335 | 					components.OrderBy[i].Column = col
336 | 				}
337 | 				if dir, ok := orderMap["direction"].(string); ok {
338 | 					components.OrderBy[i].Direction = dir
339 | 				}
340 | 			}
341 | 		}
342 | 	}
343 | 
344 | 	// Parse LIMIT
345 | 	if limit, ok := data["limit"].(float64); ok {
346 | 		components.Limit = int(limit)
347 | 	}
348 | 
349 | 	// Parse OFFSET
350 | 	if offset, ok := data["offset"].(float64); ok {
351 | 		components.Offset = int(offset)
352 | 	}
353 | 
354 | 	return nil
355 | }
356 | 
357 | // validateQueryComponents validates query components
358 | func validateQueryComponents(components *QueryComponents) error {
359 | 	if components.From == "" {
360 | 		return fmt.Errorf("FROM clause is required")
361 | 	}
362 | 
363 | 	if len(components.Select) == 0 {
364 | 		return fmt.Errorf("SELECT clause must have at least one column")
365 | 	}
366 | 
367 | 	for _, join := range components.Joins {
368 | 		if join.Table == "" {
369 | 			return fmt.Errorf("JOIN clause must have a table")
370 | 		}
371 | 		if join.On == "" {
372 | 			return fmt.Errorf("JOIN clause must have an ON condition")
373 | 		}
374 | 	}
375 | 
376 | 	for _, where := range components.Where {
377 | 		if where.Column == "" {
378 | 			return fmt.Errorf("WHERE condition must have a column")
379 | 		}
380 | 		if where.Operator == "" {
381 | 			return fmt.Errorf("WHERE condition must have an operator")
382 | 		}
383 | 	}
384 | 
385 | 	for _, order := range components.OrderBy {
386 | 		if order.Column == "" {
387 | 			return fmt.Errorf("ORDER BY clause must have a column")
388 | 		}
389 | 		if order.Direction != "ASC" && order.Direction != "DESC" {
390 | 			return fmt.Errorf("ORDER BY direction must be ASC or DESC")
391 | 		}
392 | 	}
393 | 
394 | 	return nil
395 | }
396 | 
397 | // buildQueryFromComponents builds a SQL query from components
398 | func buildQueryFromComponents(components *QueryComponents) (string, error) {
399 | 	var query strings.Builder
400 | 
401 | 	// Build SELECT clause
402 | 	query.WriteString("SELECT ")
403 | 	query.WriteString(strings.Join(components.Select, ", "))
404 | 
405 | 	// Build FROM clause
406 | 	query.WriteString(" FROM ")
407 | 	query.WriteString(components.From)
408 | 
409 | 	// Build JOIN clauses
410 | 	for _, join := range components.Joins {
411 | 		query.WriteString(" ")
412 | 		query.WriteString(strings.ToUpper(join.Type))
413 | 		query.WriteString(" JOIN ")
414 | 		query.WriteString(join.Table)
415 | 		query.WriteString(" ON ")
416 | 		query.WriteString(join.On)
417 | 	}
418 | 
419 | 	// Build WHERE clause
420 | 	if len(components.Where) > 0 {
421 | 		query.WriteString(" WHERE ")
422 | 		for i, cond := range components.Where {
423 | 			if i > 0 {
424 | 				query.WriteString(" ")
425 | 				query.WriteString(cond.Connector)
426 | 				query.WriteString(" ")
427 | 			}
428 | 			query.WriteString(cond.Column)
429 | 			query.WriteString(" ")
430 | 			query.WriteString(cond.Operator)
431 | 			if cond.Value != "" {
432 | 				query.WriteString(" ")
433 | 				query.WriteString(cond.Value)
434 | 			}
435 | 		}
436 | 	}
437 | 
438 | 	// Build GROUP BY clause
439 | 	if len(components.GroupBy) > 0 {
440 | 		query.WriteString(" GROUP BY ")
441 | 		query.WriteString(strings.Join(components.GroupBy, ", "))
442 | 	}
443 | 
444 | 	// Build HAVING clause
445 | 	if len(components.Having) > 0 {
446 | 		query.WriteString(" HAVING ")
447 | 		query.WriteString(strings.Join(components.Having, " AND "))
448 | 	}
449 | 
450 | 	// Build ORDER BY clause
451 | 	if len(components.OrderBy) > 0 {
452 | 		query.WriteString(" ORDER BY ")
453 | 		var orders []string
454 | 		for _, order := range components.OrderBy {
455 | 			orders = append(orders, order.Column+" "+order.Direction)
456 | 		}
457 | 		query.WriteString(strings.Join(orders, ", "))
458 | 	}
459 | 
460 | 	// Build LIMIT clause
461 | 	if components.Limit > 0 {
462 | 		query.WriteString(fmt.Sprintf(" LIMIT %d", components.Limit))
463 | 	}
464 | 
465 | 	// Build OFFSET clause
466 | 	if components.Offset > 0 {
467 | 		query.WriteString(fmt.Sprintf(" OFFSET %d", components.Offset))
468 | 	}
469 | 
470 | 	return query.String(), nil
471 | }
472 | 
473 | // validateQuery validates a SQL query for syntax errors
474 | func validateQuery(ctx context.Context, db db.Database, query string) (interface{}, error) {
475 | 	// Validate query by attempting to execute it with EXPLAIN
476 | 	explainQuery := "EXPLAIN " + query
477 | 	_, err := db.Query(ctx, explainQuery)
478 | 	if err != nil {
479 | 		return map[string]interface{}{
480 | 			"valid": false,
481 | 			"error": err.Error(),
482 | 			"query": query,
483 | 		}, nil
484 | 	}
485 | 
486 | 	return map[string]interface{}{
487 | 		"valid": true,
488 | 		"query": query,
489 | 	}, nil
490 | }
491 | 
492 | // analyzeQueryPlan analyzes a specific query for performance
493 | func analyzeQueryPlan(ctx context.Context, db db.Database, query string) (interface{}, error) {
494 | 	explainQuery := "EXPLAIN (FORMAT JSON, ANALYZE, BUFFERS) " + query
495 | 	rows, err := db.Query(ctx, explainQuery)
496 | 	if err != nil {
497 | 		return nil, fmt.Errorf("failed to analyze query: %w", err)
498 | 	}
499 | 	defer func() {
500 | 		if err := rows.Close(); err != nil {
501 | 			logger.Error("error closing rows: %v", err)
502 | 		}
503 | 	}()
504 | 
505 | 	var plan []byte
506 | 	if !rows.Next() {
507 | 		return nil, fmt.Errorf("no explain plan returned")
508 | 	}
509 | 	if err := rows.Scan(&plan); err != nil {
510 | 		return nil, fmt.Errorf("failed to scan explain plan: %w", err)
511 | 	}
512 | 
513 | 	return map[string]interface{}{
514 | 		"query": query,
515 | 		"plan":  string(plan),
516 | 	}, nil
517 | }
518 | 
519 | // Helper function to calculate query complexity
520 | func calculateQueryComplexity(query string) string {
521 | 	query = strings.ToUpper(query)
522 | 
523 | 	// Count common complexity factors
524 | 	joins := strings.Count(query, " JOIN ")
525 | 	subqueries := strings.Count(query, "SELECT") - 1 // Subtract the main query
526 | 	if subqueries < 0 {
527 | 		subqueries = 0
528 | 	}
529 | 
530 | 	aggregations := strings.Count(query, " SUM(") +
531 | 		strings.Count(query, " COUNT(") +
532 | 		strings.Count(query, " AVG(") +
533 | 		strings.Count(query, " MIN(") +
534 | 		strings.Count(query, " MAX(")
535 | 	groupBy := strings.Count(query, " GROUP BY ")
536 | 	orderBy := strings.Count(query, " ORDER BY ")
537 | 	having := strings.Count(query, " HAVING ")
538 | 	distinct := strings.Count(query, " DISTINCT ")
539 | 	unions := strings.Count(query, " UNION ")
540 | 
541 | 	// Calculate complexity score - adjusted to match test expectations
542 | 	score := joins*2 + (subqueries * 3) + aggregations + groupBy + orderBy + having*2 + distinct + unions*3
543 | 
544 | 	// Check special cases that should be complex
545 | 	if joins >= 3 || (joins >= 2 && subqueries >= 1) || (subqueries >= 1 && aggregations >= 1) {
546 | 		return "Complex"
547 | 	}
548 | 
549 | 	// Determine complexity level
550 | 	if score <= 2 {
551 | 		return "Simple"
552 | 	} else if score <= 6 {
553 | 		return "Moderate"
554 | 	} else {
555 | 		return "Complex"
556 | 	}
557 | }
558 | 
559 | // Helper functions to extract error information from error messages
560 | func getSuggestionForError(errorMsg string) string {
561 | 	errorMsg = strings.ToLower(errorMsg)
562 | 
563 | 	if strings.Contains(errorMsg, "syntax error") {
564 | 		return "Check SQL syntax for errors such as missing keywords, incorrect operators, or unmatched parentheses"
565 | 	} else if strings.Contains(errorMsg, "unknown column") {
566 | 		return "Column name is incorrect or doesn't exist in the specified table"
567 | 	} else if strings.Contains(errorMsg, "unknown table") {
568 | 		return "Table name is incorrect or doesn't exist in the database"
569 | 	} else if strings.Contains(errorMsg, "ambiguous") {
570 | 		return "Column name is ambiguous. Qualify it with the table name"
571 | 	} else if strings.Contains(errorMsg, "missing") && strings.Contains(errorMsg, "from") {
572 | 		return "FROM clause is missing or incorrectly formatted"
573 | 	} else if strings.Contains(errorMsg, "no such table") {
574 | 		return "Table specified does not exist in the database"
575 | 	}
576 | 
577 | 	return "Review the query syntax and structure"
578 | }
579 | 
580 | // extractLineNumberFromError extracts line number from a database error message
581 | //
582 | //nolint:unused // Used in future implementation
583 | func extractLineNumberFromError(errMsg string) int {
584 | 	// Check for line number patterns like "at line 42" or "line 42"
585 | 	linePatterns := []string{
586 | 		"at line ([0-9]+)",
587 | 		"line ([0-9]+)",
588 | 		"LINE ([0-9]+)",
589 | 	}
590 | 
591 | 	for _, pattern := range linePatterns {
592 | 		lineMatch := regexp.MustCompile(pattern).FindStringSubmatch(errMsg)
593 | 		if len(lineMatch) > 1 {
594 | 			lineNum, scanErr := strconv.Atoi(lineMatch[1])
595 | 			if scanErr != nil {
596 | 				logger.Warn("Failed to parse line number: %v", scanErr)
597 | 				continue
598 | 			}
599 | 			return lineNum
600 | 		}
601 | 	}
602 | 
603 | 	return 0
604 | }
605 | 
606 | // extractPositionFromError extracts position from a database error message
607 | //
608 | //nolint:unused // Used in future implementation
609 | func extractPositionFromError(errMsg string) int {
610 | 	// Check for position patterns
611 | 	posPatterns := []string{
612 | 		"at character ([0-9]+)",
613 | 		"position ([0-9]+)",
614 | 		"at or near \"([^\"]+)\"",
615 | 	}
616 | 
617 | 	for _, pattern := range posPatterns {
618 | 		posMatch := regexp.MustCompile(pattern).FindStringSubmatch(errMsg)
619 | 		if len(posMatch) > 1 {
620 | 			// For "at or near X" patterns, need to find X in the query
621 | 			if strings.Contains(pattern, "at or near") {
622 | 				return 0 // Just return 0 for now
623 | 			}
624 | 
625 | 			// For numeric positions
626 | 			pos, scanErr := strconv.Atoi(posMatch[1])
627 | 			if scanErr != nil {
628 | 				logger.Warn("Failed to parse position: %v", scanErr)
629 | 				continue
630 | 			}
631 | 			return pos
632 | 		}
633 | 	}
634 | 
635 | 	return 0
636 | }
637 | 
638 | // Mock functions for use when database is not available
639 | 
640 | // mockValidateQuery provides mock validation of SQL queries
641 | func mockValidateQuery(query string) (interface{}, error) {
642 | 	query = strings.TrimSpace(query)
643 | 
644 | 	// Basic syntax checks for demonstration purposes
645 | 	if !strings.HasPrefix(strings.ToUpper(query), "SELECT") {
646 | 		return map[string]interface{}{
647 | 			"valid":       false,
648 | 			"query":       query,
649 | 			"error":       "Query must start with SELECT",
650 | 			"suggestion":  "Begin your query with the SELECT keyword",
651 | 			"errorLine":   1,
652 | 			"errorColumn": 1,
653 | 		}, nil
654 | 	}
655 | 
656 | 	if !strings.Contains(strings.ToUpper(query), " FROM ") {
657 | 		return map[string]interface{}{
658 | 			"valid":       false,
659 | 			"query":       query,
660 | 			"error":       "Missing FROM clause",
661 | 			"suggestion":  "Add a FROM clause to specify the table or view to query",
662 | 			"errorLine":   1,
663 | 			"errorColumn": len("SELECT"),
664 | 		}, nil
665 | 	}
666 | 
667 | 	// Check for unbalanced parentheses
668 | 	if strings.Count(query, "(") != strings.Count(query, ")") {
669 | 		return map[string]interface{}{
670 | 			"valid":       false,
671 | 			"query":       query,
672 | 			"error":       "Unbalanced parentheses",
673 | 			"suggestion":  "Ensure all opening parentheses have matching closing parentheses",
674 | 			"errorLine":   1,
675 | 			"errorColumn": 0,
676 | 		}, nil
677 | 	}
678 | 
679 | 	// Check for unclosed quotes
680 | 	if strings.Count(query, "'")%2 != 0 {
681 | 		return map[string]interface{}{
682 | 			"valid":       false,
683 | 			"query":       query,
684 | 			"error":       "Unclosed string literal",
685 | 			"suggestion":  "Ensure all string literals are properly closed with matching quotes",
686 | 			"errorLine":   1,
687 | 			"errorColumn": 0,
688 | 		}, nil
689 | 	}
690 | 
691 | 	// Query appears valid
692 | 	return map[string]interface{}{
693 | 		"valid": true,
694 | 		"query": query,
695 | 	}, nil
696 | }
697 | 
698 | // mockAnalyzeQuery provides mock analysis of SQL queries
699 | func mockAnalyzeQuery(query string) (interface{}, error) {
700 | 	query = strings.ToUpper(query)
701 | 
702 | 	// Mock analysis results
703 | 	var issues []string
704 | 	var suggestions []string
705 | 
706 | 	// Check for potential performance issues
707 | 	if !strings.Contains(query, " WHERE ") {
708 | 		issues = append(issues, "Query has no WHERE clause")
709 | 		suggestions = append(suggestions, "Add a WHERE clause to filter results and improve performance")
710 | 	}
711 | 
712 | 	// Check for multiple joins
713 | 	joinCount := strings.Count(query, " JOIN ")
714 | 	if joinCount > 1 {
715 | 		issues = append(issues, "Query contains multiple joins")
716 | 		suggestions = append(suggestions, "Multiple joins can impact performance. Consider denormalizing or using indexed columns")
717 | 	}
718 | 
719 | 	if strings.Contains(query, " LIKE '%") || strings.Contains(query, "% LIKE") {
720 | 		issues = append(issues, "Query uses LIKE with leading wildcard")
721 | 		suggestions = append(suggestions, "Leading wildcards in LIKE conditions cannot use indexes. Consider alternative approaches")
722 | 	}
723 | 
724 | 	if strings.Contains(query, " ORDER BY ") && !strings.Contains(query, " LIMIT ") {
725 | 		issues = append(issues, "ORDER BY without LIMIT")
726 | 		suggestions = append(suggestions, "Consider adding a LIMIT clause to prevent sorting large result sets")
727 | 	}
728 | 
729 | 	// Create a mock explain plan
730 | 	mockExplainPlan := []map[string]interface{}{
731 | 		{
732 | 			"id":            1,
733 | 			"select_type":   "SIMPLE",
734 | 			"table":         getTableFromQuery(query),
735 | 			"type":          "ALL",
736 | 			"possible_keys": nil,
737 | 			"key":           nil,
738 | 			"key_len":       nil,
739 | 			"ref":           nil,
740 | 			"rows":          1000,
741 | 			"Extra":         "",
742 | 		},
743 | 	}
744 | 
745 | 	// If the query has a WHERE clause, assume it might use an index
746 | 	if strings.Contains(query, " WHERE ") {
747 | 		mockExplainPlan[0]["type"] = "range"
748 | 		mockExplainPlan[0]["possible_keys"] = "PRIMARY"
749 | 		mockExplainPlan[0]["key"] = "PRIMARY"
750 | 		mockExplainPlan[0]["key_len"] = 4
751 | 		mockExplainPlan[0]["rows"] = 100
752 | 	}
753 | 
754 | 	return map[string]interface{}{
755 | 		"query":       query,
756 | 		"explainPlan": mockExplainPlan,
757 | 		"issues":      issues,
758 | 		"suggestions": suggestions,
759 | 		"complexity":  calculateQueryComplexity(query),
760 | 		"is_mock":     true,
761 | 	}, nil
762 | }
763 | 
764 | // Helper function to extract table name from a query
765 | func getTableFromQuery(query string) string {
766 | 	queryUpper := strings.ToUpper(query)
767 | 
768 | 	// Try to find the table name after FROM
769 | 	fromIndex := strings.Index(queryUpper, " FROM ")
770 | 	if fromIndex == -1 {
771 | 		return "unknown_table"
772 | 	}
773 | 
774 | 	// Get the text after FROM
775 | 	afterFrom := query[fromIndex+6:]
776 | 	afterFromUpper := queryUpper[fromIndex+6:]
777 | 
778 | 	// Find the end of the table name (next space, comma, or parenthesis)
779 | 	endIndex := len(afterFrom)
780 | 	for i, char := range afterFromUpper {
781 | 		if char == ' ' || char == ',' || char == '(' || char == ')' {
782 | 			endIndex = i
783 | 			break
784 | 		}
785 | 	}
786 | 
787 | 	tableName := strings.TrimSpace(afterFrom[:endIndex])
788 | 
789 | 	// If there's an alias, remove it
790 | 	tableNameParts := strings.Split(tableName, " AS ")
791 | 	if len(tableNameParts) > 1 {
792 | 		return tableNameParts[0]
793 | 	}
794 | 
795 | 	return tableName
796 | }
797 | 
```

--------------------------------------------------------------------------------
/README-old.md:
--------------------------------------------------------------------------------

```markdown
  1 | <div align="center">
  2 | 
  3 | # Multi DB MCP Server
  4 | 
  5 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
  6 | [![Go Report Card](https://goreportcard.com/badge/github.com/FreePeak/db-mcp-server)](https://goreportcard.com/report/github.com/FreePeak/db-mcp-server)
  7 | [![Go Reference](https://pkg.go.dev/badge/github.com/FreePeak/db-mcp-server.svg)](https://pkg.go.dev/github.com/FreePeak/db-mcp-server)
  8 | [![Contributors](https://img.shields.io/github/contributors/FreePeak/db-mcp-server)](https://github.com/FreePeak/db-mcp-server/graphs/contributors)
  9 | 
 10 | <h3>A robust multi-database implementation of the Database Model Context Protocol (DB MCP)</h3>
 11 | 
 12 | [Features](#key-features) • [AI Benefits](#ai-integration-benefits) • [Installation](#installation) • [Usage](#usage) • [Documentation](#documentation) • [Contributing](#contributing) • [License](#license)
 13 | 
 14 | </div>
 15 | 
 16 | ---
 17 | 
 18 | ## 📋 Overview
 19 | 
 20 | The DB MCP Server is a high-performance implementation of the Database Model Context Protocol designed to revolutionize how AI agents interact with databases. By creating a standardized communication layer between AI models and database systems, it enables AI agents to discover, understand, and manipulate database structures with unprecedented context awareness. Currently supporting MySQL and PostgreSQL databases, with plans to expand to most widely used databases including NoSQL solutions, DB MCP Server eliminates the knowledge gap between AI agents and your data, enabling more intelligent, context-aware database operations that previously required human expertise.
 21 | 
 22 | ## ✨ Key Features
 23 | 
 24 | - **AI-Optimized Context Protocol**: Provides rich database context to AI agents, enabling them to reason about schema, relationships, and data patterns
 25 | - **Semantic Understanding Bridge**: Translates between natural language queries and database operations with full schema awareness
 26 | - **Contextual Database Operations**: Allows AI agents to execute database operations with full understanding of schema, constraints, and relationships
 27 | - **Multi-Database Support**: Currently supports MySQL and PostgreSQL with plans for expansion
 28 | - **Dynamic Tool Registry**: Register, discover, and invoke database tools at runtime via standard protocol AI agents can understand
 29 | - **Editor Integration**: First-class support for VS Code and Cursor extensions with AI-aware features
 30 | - **Schema-Aware Assistance**: Provides AI models with complete database structure knowledge for better suggestions
 31 | - **Performance Insights**: Delivers performance analytics that AI can leverage for optimization recommendations
 32 | 
 33 | ## 🧠 AI Integration Benefits
 34 | 
 35 | The DB MCP Server transforms how AI agents interact with databases in several key ways:
 36 | 
 37 | ### Enhanced Contextual Understanding
 38 | - **Schema Awareness**: AI agents gain complete knowledge of database tables, columns, relationships, and constraints
 39 | - **Semantic Relationship Mapping**: Enables AI to understand not just structure but meaning and purpose of data elements
 40 | - **Query Context Preservation**: Maintains context between related operations for coherent multi-step reasoning
 41 | 
 42 | ### Intelligent Database Operations
 43 | - **Natural Language to SQL**: Translates user intent into optimized database operations with full schema awareness
 44 | - **Context-Aware Query Generation**: Creates queries that respect database structure, types, and relationships
 45 | - **Error Prevention**: Understands database constraints before execution, preventing common errors
 46 | - **Optimization Suggestions**: Provides AI with execution metrics for intelligent query improvement recommendations
 47 | 
 48 | ### Workflow Optimization
 49 | - **Reduced Context Window Usage**: Efficiently provides database structure without consuming AI token context
 50 | - **Operation Chaining**: Enables complex multi-step operations with persistent context
 51 | - **Intelligent Defaults**: Suggests appropriate actions based on database structure and common patterns
 52 | - **Progressive Disclosure**: Reveals database complexity progressively as needed by the AI agent
 53 | 
 54 | ## 🚀 Installation
 55 | 
 56 | ### Prerequisites
 57 | 
 58 | - Go 1.18 or later
 59 | - Supported databases:
 60 |   - MySQL
 61 |   - PostgreSQL
 62 |   - (Additional databases in roadmap)
 63 | - Docker (optional, for containerized deployment)
 64 | 
 65 | ### Quick Start
 66 | 
 67 | ```bash
 68 | # Clone the repository
 69 | git clone https://github.com/FreePeak/db-mcp-server.git
 70 | cd db-mcp-server
 71 | 
 72 | # Copy and configure environment variables
 73 | cp .env.example .env
 74 | # Edit .env with your configuration
 75 | 
 76 | # Option 1: Build and run locally with SSE transport (default)
 77 | make build
 78 | ./mcp-server
 79 | 
 80 | # Option 2: Build and run with STDIO transport
 81 | make build
 82 | ./mcp-server -t stdio
 83 | 
 84 | # Option 3: Using Docker
 85 | docker build -t db-mcp-server .
 86 | docker run -p 9090:9090 db-mcp-server
 87 | 
 88 | # Option 4: Using Docker Compose (with MySQL)
 89 | docker-compose up -d
 90 | ```
 91 | 
 92 | ### Transport Modes
 93 | 
 94 | The server supports two transport modes:
 95 | 
 96 | 1. **SSE (Server-Sent Events)** - Default mode for browser and HTTP clients
 97 |    ```bash
 98 |    ./mcp-server -t sse
 99 |    ```
100 | 
101 | 2. **STDIO (Standard Input/Output)** - For command-line tools and integrations
102 |    ```bash
103 |    ./mcp-server -t stdio
104 |    ```
105 |    
106 | For STDIO mode, see the [examples directory](./examples) for usage examples.
107 | 
108 | ### Docker
109 | 
110 | ```bash
111 | # Build the Docker image
112 | docker build -t db-mcp-server .
113 | 
114 | # Run the container
115 | docker run -p 9090:9090 db-mcp-server
116 | 
117 | # Run with custom configuration
118 | docker run -p 8080:8080 \
119 |   -e SERVER_PORT=8080 \
120 |   -e LOG_LEVEL=debug \
121 |   -e DB_TYPE=mysql \
122 |   -e DB_HOST=my-database-server \
123 |   db-mcp-server
124 |   
125 | # Run with Docker Compose (includes MySQL database)
126 | docker-compose up -d
127 | ```
128 | 
129 | ## 🔧 Configuration
130 | 
131 | DB MCP Server can be configured via environment variables or a `.env` file:
132 | 
133 | | Variable | Description | Default |
134 | |----------|-------------|---------|
135 | | `SERVER_PORT` | Server port | `9092` |
136 | | `TRANSPORT_MODE` | Transport mode (stdio, sse) | `stdio` |
137 | | `LOG_LEVEL` | Logging level (debug, info, warn, error) | `debug` |
138 | | `DB_TYPE` | Database type (mysql, postgres) | `mysql` |
139 | | `DB_HOST` | Database host | `localhost` |
140 | | `DB_PORT` | Database port | `3306` |
141 | | `DB_USER` | Database username | `iamrevisto` |
142 | | `DB_PASSWORD` | Database password | `password` |
143 | | `DB_NAME` | Database name | `revisto` |
144 | | `DB_ROOT_PASSWORD` | Database root password (for container setup) | `root_password` |
145 | 
146 | See `.env.example` for more configuration options.
147 | 
148 | ## 📖 Usage
149 | 
150 | ### Integrating with Cursor Edit and AI Agents
151 | 
152 | DB MCP Server creates a powerful bridge between your databases and AI assistants in Cursor Edit, enabling AI-driven database operations with full context awareness. Configure your Cursor settings in `.cursor/mcp.json`:
153 | 
154 | ```json
155 | {
156 |     "mcpServers": {
157 |         "db-mcp-server": {
158 |             "url": "http://localhost:9090/sse"
159 |         }
160 |     }
161 | }
162 | ```
163 | 
164 | To leverage AI-powered database operations:
165 | 
166 | 1. Configure and start the DB MCP Server using one of the installation methods above
167 | 2. Add the configuration to your Cursor settings
168 | 3. Open Cursor and navigate to a SQL or code file
169 | 4. The AI assistant now has access to your database schema, relationships, and capabilities
170 | 5. Ask the AI to generate, explain, or optimize database queries with full schema awareness
171 | 6. Execute AI-generated queries directly from Cursor
172 | 
173 | The MCP Server enhances AI assistant capabilities with:
174 | 
175 | - Complete database schema understanding
176 | - Relationship-aware query generation
177 | - Intelligent query optimization recommendations
178 | - Error prevention through constraint awareness
179 | - Performance metrics for better suggestions
180 | - Context persistence across multiple operations
181 | 
182 | ### Example AI Interactions
183 | 
184 | ```
185 | # Ask the AI for schema information
186 | "What tables are in the database and how are they related?"
187 | 
188 | # Request query generation with context
189 | "Create a query to find all orders from customers in California with items over $100"
190 | 
191 | # Get optimization suggestions
192 | "How can I optimize this query that's taking too long to execute?"
193 | 
194 | # Request complex data operations
195 | "Help me create a transaction that updates inventory levels when an order is placed"
196 | ```
197 | 
198 | ## 📚 Documentation
199 | 
200 | ### DB MCP Protocol for AI Integration
201 | 
202 | The server implements the DB MCP protocol with methods specifically designed to enhance AI agent capabilities:
203 | 
204 | - **initialize**: Sets up the session, transmits schema context, and returns server capabilities
205 | - **tools/list**: Enables AI agents to discover available database tools dynamically
206 | - **tools/call**: Allows AI to execute database tools with full context
207 | - **editor/context**: Updates the server with editor context for better AI awareness
208 | - **schema/explore**: Provides AI with detailed database structure information
209 | - **cancel**: Cancels an in-progress operation
210 | 
211 | For full protocol documentation, visit the [MCP Specification](https://github.com/microsoft/mcp) and our database-specific extensions for AI integration.
212 | 
213 | ### Tool System
214 | 
215 | The DB MCP Server includes a powerful AI-aware tool system that provides large language models and AI assistants with a structured way to discover and invoke database tools. Each tool has:
216 | 
217 | - A unique name discoverable by AI
218 | - A comprehensive description that AI can understand
219 | - A JSON Schema for input validation and AI parameter generation
220 | - A structured output format that AI can parse and reason about
221 | - A handler function that executes the tool's logic with context awareness
222 | 
223 | This structure enables AI agents to intelligently select, parameterize, and invoke the right database operations without requiring hard-coded knowledge of your specific database schema.
224 | 
225 | ### Built-in Tools for AI Integration
226 | 
227 | The server includes AI-optimized database tools that provide rich context and capabilities:
228 | 
229 | | Tool | Description | AI Benefits |
230 | |------|-------------|------------|
231 | | `dbQuery` | Executes read-only SQL queries with parameterized inputs | Enables AI to retrieve data with full schema knowledge |
232 | | `dbExecute` | Performs data modification operations (INSERT, UPDATE, DELETE) | Allows AI to safely modify data with constraint awareness |
233 | | `dbTransaction` | Manages SQL transactions with commit and rollback support | Supports AI in creating complex multi-step operations |
234 | | `dbSchema` | Auto-discovers database structure and relationships | Provides AI with complete schema context for reasoning |
235 | | `dbQueryBuilder` | Visual SQL query construction with syntax validation | Helps AI create syntactically correct queries |
236 | | `dbPerformanceAnalyzer` | Identifies slow queries and provides optimization suggestions | Enables AI to suggest performance improvements |
237 | | `showConnectedDatabases` | Shows information about all connected databases | Enables AI to understand available database connections and their status |
238 | 
239 | ### Multiple Database Support
240 | 
241 | DB MCP Server supports connecting to multiple databases simultaneously, allowing AI agents to work across different database systems in a unified way. Each database connection is identified by a unique ID that can be referenced when using database tools.
242 | 
243 | #### Configuring Multiple Databases
244 | In your .env file
245 | ```
246 | # Multi-Database Configuration
247 | DB_CONFIG_FILE=config/databases.json
248 | ```
249 | Configure multiple database connections in your `db-mcp-server/config/databases.json` file or environment variables:
250 | 
251 | ```
252 | # Multiple Database Configuration
253 | {
254 |   "connections": [
255 |     {
256 |       "id": "mysql1",
257 |       "type": "mysql",
258 |       "host": "localhost",
259 |       "port": 13306,
260 |       "user": "user1",
261 |       "password": "password1",
262 |       "name": "db1"
263 |     },
264 |     {
265 |       "id": "mysql2",
266 |       "type": "mysql",
267 |       "host": "localhost",
268 |       "port": 13307,
269 |       "user": "user3",
270 |       "password": "password3",
271 |       "name": "db3"
272 |     },
273 |     {
274 |       "id": "postgres1",
275 |       "type": "postgres",
276 |       "host": "localhost",
277 |       "port": 15432,
278 |       "user": "user2",
279 |       "password": "password2",
280 |       "name": "db2"
281 |     }
282 |   ]
283 | } 
284 | ```
285 | 
286 | #### Viewing Connected Databases
287 | 
288 | Use the `showConnectedDatabases` tool to see all connected databases with their status and connection information:
289 | 
290 | ```json
291 | // Get information about all connected databases
292 | {
293 |   "name": "showConnectedDatabases"
294 | }
295 | ```
296 | 
297 | Example response:
298 | 
299 | ```json
300 | [
301 |   {
302 |     "id": "mysql1",
303 |     "type": "mysql",
304 |     "host": "localhost",
305 |     "port": 3306,
306 |     "database": "db1",
307 |     "status": "connected",
308 |     "latency": "1.2ms"
309 |   },
310 |   {
311 |     "id": "postgres1",
312 |     "type": "postgres",
313 |     "host": "localhost",
314 |     "port": 5432,
315 |     "database": "db2",
316 |     "status": "connected",
317 |     "latency": "0.8ms"
318 |   }
319 | ]
320 | ```
321 | 
322 | #### Specifying Database for Operations
323 | 
324 | When using database tools, you must specify which database to use with the `database` parameter:
325 | 
326 | ```json
327 | // Query a specific database by ID
328 | {
329 |   "name": "dbQuery",
330 |   "arguments": {
331 |     "database": "postgres1",
332 |     "query": "SELECT * FROM users LIMIT 10"
333 |   }
334 | }
335 | 
336 | // Execute statement on a specific database
337 | {
338 |   "name": "dbExecute",
339 |   "arguments": {
340 |     "database": "mysql2",
341 |     "statement": "UPDATE products SET stock = stock - 1 WHERE id = 5"
342 |   }
343 | }
344 | 
345 | // Get schema from a specific database
346 | {
347 |   "name": "dbSchema",
348 |   "arguments": {
349 |     "database": "mysql1",
350 |     "component": "tables"
351 |   }
352 | }
353 | ```
354 | 
355 | > **Note**: Always use `database` as the parameter name when specifying which database to use. This is required for all database operation tools.
356 | 
357 | If your configuration has only one database connection, you must still provide the database ID that matches the ID in your configuration.
358 | 
359 | ### Database Schema Explorer Tool
360 | 
361 | The MCP Server includes an AI-aware Database Schema Explorer tool (`dbSchema`) that provides AI models with complete database structural knowledge:
362 | 
363 | ```json
364 | // Get all tables in the database - enables AI to understand available data entities
365 | {
366 |   "name": "dbSchema",
367 |   "arguments": {
368 |     "database": "mysql1",
369 |     "component": "tables"
370 |   }
371 | }
372 | 
373 | // Get columns for a specific table - gives AI detailed field information
374 | {
375 |   "name": "dbSchema",
376 |   "arguments": {
377 |     "database": "postgres1",
378 |     "component": "columns",
379 |     "table": "users"
380 |   }
381 | }
382 | 
383 | // Get relationships for a specific table or all relationships - helps AI understand data connections
384 | {
385 |   "name": "dbSchema",
386 |   "arguments": {
387 |     "database": "mysql1",
388 |     "component": "relationships",
389 |     "table": "orders"
390 |   }
391 | }
392 | 
393 | // Get the full database schema - provides AI with comprehensive structural context
394 | {
395 |   "name": "dbSchema",
396 |   "arguments": {
397 |     "database": "postgres1",
398 |     "component": "full"
399 |   }
400 | }
401 | ```
402 | 
403 | The Schema Explorer supports both MySQL and PostgreSQL databases, automatically adapting to your configured database type and providing AI with the appropriate contextual information.
404 | 
405 | ### Visual Query Builder Tool
406 | 
407 | The MCP Server includes a powerful Visual Query Builder tool (`dbQueryBuilder`) that helps you construct SQL queries with syntax validation:
408 | 
409 | ```json
410 | // Validate a SQL query for syntax errors
411 | {
412 |   "name": "dbQueryBuilder",
413 |   "arguments": {
414 |     "database": "mysql1",
415 |     "action": "validate",
416 |     "query": "SELECT * FROM users WHERE status = 'active'"
417 |   }
418 | }
419 | 
420 | // Build a SQL query from components
421 | {
422 |   "name": "dbQueryBuilder",
423 |   "arguments": {
424 |     "database": "postgres1",
425 |     "action": "build",
426 |     "components": {
427 |       "select": ["id", "name", "email"],
428 |       "from": "users",
429 |       "where": [
430 |         {
431 |           "column": "status",
432 |           "operator": "=",
433 |           "value": "active"
434 |         }
435 |       ],
436 |       "orderBy": [
437 |         {
438 |           "column": "name",
439 |           "direction": "ASC"
440 |         }
441 |       ],
442 |       "limit": 10
443 |     }
444 |   }
445 | }
446 | 
447 | // Analyze a SQL query for potential issues and performance
448 | {
449 |   "name": "dbQueryBuilder",
450 |   "arguments": {
451 |     "database": "mysql1",
452 |     "action": "analyze",
453 |     "query": "SELECT u.*, o.* FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = 'active' AND o.created_at > '2023-01-01'"
454 |   }
455 | }
456 | ```
457 | 
458 | Example response from a query build operation:
459 | 
460 | ```json
461 | {
462 |   "query": "SELECT id, name, email FROM users WHERE status = 'active' ORDER BY name ASC LIMIT 10",
463 |   "components": {
464 |     "select": ["id", "name", "email"],
465 |     "from": "users",
466 |     "where": [{
467 |       "column": "status",
468 |       "operator": "=",
469 |       "value": "active"
470 |     }],
471 |     "orderBy": [{
472 |       "column": "name",
473 |       "direction": "ASC"
474 |     }],
475 |     "limit": 10
476 |   },
477 |   "validation": {
478 |     "valid": true,
479 |     "query": "SELECT id, name, email FROM users WHERE status = 'active' ORDER BY name ASC LIMIT 10"
480 |   }
481 | }
482 | ```
483 | 
484 | The Query Builder supports:
485 | - SELECT statements with multiple columns
486 | - JOIN operations (inner, left, right, full)
487 | - WHERE conditions with various operators
488 | - GROUP BY and HAVING clauses
489 | - ORDER BY with sorting direction
490 | - LIMIT and OFFSET for pagination
491 | - Syntax validation and error suggestions
492 | - Query complexity analysis
493 | 
494 | ### Performance Analyzer Tool
495 | 
496 | The MCP Server includes a powerful Performance Analyzer tool (`dbPerformanceAnalyzer`) that identifies slow queries and provides optimization suggestions:
497 | 
498 | ```json
499 | // Get slow queries that exceed the configured threshold
500 | {
501 |   "name": "dbPerformanceAnalyzer",
502 |   "arguments": {
503 |     "database": "mysql1",
504 |     "action": "getSlowQueries",
505 |     "limit": 5
506 |   }
507 | }
508 | 
509 | // Get metrics for all tracked queries sorted by average duration
510 | {
511 |   "name": "dbPerformanceAnalyzer",
512 |   "arguments": {
513 |     "database": "postgres1",
514 |     "action": "getMetrics",
515 |     "limit": 10
516 |   }
517 | }
518 | 
519 | // Analyze a specific query for optimization opportunities
520 | {
521 |   "name": "dbPerformanceAnalyzer",
522 |   "arguments": {
523 |     "database": "mysql1",
524 |     "action": "analyzeQuery",
525 |     "query": "SELECT * FROM orders JOIN users ON orders.user_id = users.id WHERE orders.status = 'pending'"
526 |   }
527 | }
528 | 
529 | // Reset all collected performance metrics
530 | {
531 |   "name": "dbPerformanceAnalyzer",
532 |   "arguments": {
533 |     "database": "postgres1",
534 |     "action": "reset"
535 |   }
536 | }
537 | 
538 | // Set the threshold for identifying slow queries (in milliseconds)
539 | {
540 |   "name": "dbPerformanceAnalyzer",
541 |   "arguments": {
542 |     "database": "mysql1",
543 |     "action": "setThreshold",
544 |     "threshold": 300
545 |   }
546 | }
547 | ```
548 | 
549 | Example response from a performance analysis:
550 | 
551 | ```json
552 | {
553 |   "query": "SELECT * FROM orders JOIN users ON orders.user_id = users.id WHERE orders.status = 'pending'",
554 |   "suggestions": [
555 |     "Avoid using SELECT * - specify only the columns you need",
556 |     "Verify that ORDER BY columns are properly indexed",
557 |     "Consider adding appropriate indexes for frequently queried columns"
558 |   ]
559 | }
560 | ```
561 | 
562 | Example response from getting slow queries:
563 | 
564 | ```json
565 | {
566 |   "metrics": [
567 |     {
568 |       "query": "SELECT * FROM large_table WHERE status = ?",
569 |       "count": 15,
570 |       "totalDuration": "2.5s",
571 |       "minDuration": "120ms",
572 |       "maxDuration": "750ms",
573 |       "avgDuration": "166ms",
574 |       "lastExecuted": "2025-06-15T14:23:45Z"
575 |     },
576 |     {
577 |       "query": "SELECT order_id, SUM(amount) FROM order_items GROUP BY order_id",
578 |       "count": 8,
579 |       "totalDuration": "1.2s",
580 |       "minDuration": "110ms",
581 |       "maxDuration": "580ms",
582 |       "avgDuration": "150ms",
583 |       "lastExecuted": "2025-06-15T14:20:12Z"
584 |     }
585 |   ],
586 |   "count": 2,
587 |   "threshold": "100ms"
588 | }
589 | ```
590 | 
591 | The Performance Analyzer automatically tracks all query executions and provides:
592 | - Identification of slow-performing queries
593 | - Query execution metrics (count, min, max, average durations)
594 | - Pattern-based query analysis
595 | - Optimization suggestions
596 | - Performance trend monitoring
597 | - Configurable slow query thresholds
598 | 
599 | ### Database Transactions Tool
600 | 
601 | For operations that require transaction support, use the `dbTransaction` tool:
602 | 
603 | ```json
604 | // Begin a transaction
605 | {
606 |   "name": "dbTransaction",
607 |   "arguments": {
608 |     "database": "mysql1",
609 |     "action": "begin",
610 |     "readOnly": false
611 |   }
612 | }
613 | 
614 | // Execute a statement within the transaction
615 | {
616 |   "name": "dbTransaction",
617 |   "arguments": {
618 |     "database": "mysql1",
619 |     "action": "execute",
620 |     "transactionId": "tx-1684785421293", // ID returned from the begin operation
621 |     "statement": "INSERT INTO orders (customer_id, amount) VALUES (?, ?)",
622 |     "params": ["123", "450.00"]
623 |   }
624 | }
625 | 
626 | // Commit the transaction
627 | {
628 |   "name": "dbTransaction",
629 |   "arguments": {
630 |     "database": "mysql1",
631 |     "action": "commit",
632 |     "transactionId": "tx-1684785421293"
633 |   }
634 | }
635 | 
636 | // Rollback the transaction (in case of errors)
637 | {
638 |   "name": "dbTransaction",
639 |   "arguments": {
640 |     "database": "mysql1",
641 |     "action": "rollback",
642 |     "transactionId": "tx-1684785421293"
643 |   }
644 | }
645 | ```
646 | 
647 | ### Editor Integration
648 | 
649 | The server includes support for editor-specific features through the `editor/context` method, enabling tools to be aware of:
650 | 
651 | - Current SQL file
652 | - Selected query
653 | - Cursor position
654 | - Open database connections
655 | - Database structure
656 | 
657 | ## 🗺️ Roadmap
658 | 
659 | We're committed to expanding DB MCP Server's AI integration capabilities. Here's our planned development roadmap:
660 | 
661 | ### Q2 2025
662 | - ✅ **AI-Aware Schema Explorer** - Auto-discover database structure and relationships for AI context
663 | - ✅ **Context-Aware Query Builder** - AI-driven SQL query construction with syntax validation
664 | - ✅ **Performance Analyzer with AI Insights** - Identify optimization opportunities with AI recommendations
665 | 
666 | ### Q3 2025
667 | - **AI-Powered Data Visualization** - Create charts and graphs from query results with AI suggestions
668 | - **AI-Driven Model Generator** - Auto-generate code models from database tables using AI patterns
669 | - **Multi-DB Support Expansion with Cross-DB AI Reasoning** - Add support with AI that understands:
670 |   - **MongoDB** - Document-oriented schema for AI reasoning
671 |   - **Redis** - Key-value pattern recognition for AI
672 |   - **SQLite** - Lightweight database understanding
673 | 
674 | ### Q4 2025
675 | - **AI-Assisted Migration Manager** - Version-controlled schema changes with AI recommendations
676 | - **Intelligent Access Control** - AI-aware permissions for database operations
677 | - **Context-Enriched Query History** - Track queries with execution metrics for AI learning
678 | - **Additional Database Integrations with AI Context**:
679 |   - **Cassandra** - Distributed schema understanding
680 |   - **Elasticsearch** - Search-optimized AI interactions
681 |   - **DynamoDB** - NoSQL reasoning capabilities
682 |   - **Oracle** - Enterprise schema comprehension
683 | 
684 | ### Future Vision
685 | - **Complete Database Coverage with Unified AI Context** - Support for all major databases with consistent AI interface
686 | - **AI-Assisted Query Optimization** - Smart recommendations using machine learning
687 | - **Cross-Database AI Operations** - Unified interface for heterogeneous database environments
688 | - **Real-Time Collaborative AI** - Multi-user AI assistance for collaborative database work
689 | - **AI-Powered Plugin System** - Community-driven extension marketplace with AI discovery
690 | 
691 | ## 🤝 Contributing
692 | 
693 | Contributions are welcome! Here's how you can help:
694 | 
695 | 1. **Fork** the repository
696 | 2. **Create** a feature branch: `git checkout -b new-feature`
697 | 3. **Commit** your changes: `git commit -am 'Add new feature'` 
698 | 4. **Push** to the branch: `git push origin new-feature`
699 | 5. **Submit** a pull request
700 | 
701 | Please make sure your code follows our coding standards and includes appropriate tests.
702 | 
703 | ## 📝 License
704 | 
705 | This project is licensed under the MIT License - see the LICENSE file for details.
706 | 
707 | ## 📧 Support & Contact
708 | 
709 | - For questions or issues, email [[email protected]](mailto:[email protected])
710 | - Open an issue directly: [Issue Tracker](https://github.com/FreePeak/db-mcp-server/issues)
711 | - If DB MCP Server helps your work, please consider supporting:
712 | 
713 | <p align="">
714 | <a href="https://www.buymeacoffee.com/linhdmn">
715 | <img src="https://img.buymeacoffee.com/button-api/?text=Support DB MCP Server&emoji=☕&slug=linhdmn&button_colour=FFDD00&font_colour=000000&font_family=Cookie&outline_colour=000000&coffee_colour=ffffff" 
716 | alt="Buy Me A Coffee"/>
717 | </a>
718 | </p>
```

--------------------------------------------------------------------------------
/internal/delivery/mcp/timescale_completion.go:
--------------------------------------------------------------------------------

```go
  1 | package mcp
  2 | 
  3 | import (
  4 | 	"context"
  5 | 	"fmt"
  6 | )
  7 | 
  8 | // CompletionItem represents a code completion item
  9 | type CompletionItem struct {
 10 | 	Name             string   `json:"name"`
 11 | 	Type             string   `json:"type"`
 12 | 	Documentation    string   `json:"documentation"`
 13 | 	InsertText       string   `json:"insertText"`
 14 | 	Parameters       []string `json:"parameters,omitempty"`
 15 | 	ReturnType       string   `json:"returnType,omitempty"`
 16 | 	Category         string   `json:"category,omitempty"`
 17 | 	SortText         string   `json:"sortText,omitempty"`
 18 | 	FilterText       string   `json:"filterText,omitempty"`
 19 | 	CommitCharacters []string `json:"commitCharacters,omitempty"`
 20 | }
 21 | 
 22 | // QuerySuggestion represents a suggested query template for TimescaleDB
 23 | type QuerySuggestion struct {
 24 | 	Title       string `json:"title"`
 25 | 	Description string `json:"description"`
 26 | 	Query       string `json:"query"`
 27 | 	Category    string `json:"category"`
 28 | }
 29 | 
 30 | // TimescaleDBCompletionProvider provides code completion for TimescaleDB functions
 31 | type TimescaleDBCompletionProvider struct {
 32 | 	contextProvider *TimescaleDBContextProvider
 33 | }
 34 | 
 35 | // NewTimescaleDBCompletionProvider creates a new TimescaleDB completion provider
 36 | func NewTimescaleDBCompletionProvider() *TimescaleDBCompletionProvider {
 37 | 	return &TimescaleDBCompletionProvider{
 38 | 		contextProvider: NewTimescaleDBContextProvider(),
 39 | 	}
 40 | }
 41 | 
 42 | // GetTimeBucketCompletions returns completions for time_bucket functions
 43 | func (p *TimescaleDBCompletionProvider) GetTimeBucketCompletions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]CompletionItem, error) {
 44 | 	// First check if TimescaleDB is available
 45 | 	tsdbContext, err := p.contextProvider.DetectTimescaleDB(ctx, dbID, useCase)
 46 | 	if err != nil {
 47 | 		return nil, fmt.Errorf("failed to detect TimescaleDB: %w", err)
 48 | 	}
 49 | 
 50 | 	if !tsdbContext.IsTimescaleDB {
 51 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
 52 | 	}
 53 | 
 54 | 	// Define time bucket function completions
 55 | 	completions := []CompletionItem{
 56 | 		{
 57 | 			Name:             "time_bucket",
 58 | 			Type:             "function",
 59 | 			Documentation:    "TimescaleDB function that groups time into buckets. Useful for downsampling time-series data.",
 60 | 			InsertText:       "time_bucket($1, $2)",
 61 | 			Parameters:       []string{"interval", "timestamp"},
 62 | 			ReturnType:       "timestamp",
 63 | 			Category:         "TimescaleDB",
 64 | 			CommitCharacters: []string{"("},
 65 | 		},
 66 | 		{
 67 | 			Name:             "time_bucket_gapfill",
 68 | 			Type:             "function",
 69 | 			Documentation:    "TimescaleDB function similar to time_bucket but fills in missing values (gaps) in the result.",
 70 | 			InsertText:       "time_bucket_gapfill($1, $2)",
 71 | 			Parameters:       []string{"interval", "timestamp"},
 72 | 			ReturnType:       "timestamp",
 73 | 			Category:         "TimescaleDB",
 74 | 			CommitCharacters: []string{"("},
 75 | 		},
 76 | 		{
 77 | 			Name:             "time_bucket_ng",
 78 | 			Type:             "function",
 79 | 			Documentation:    "TimescaleDB next-generation time bucket function that supports timezone-aware bucketing.",
 80 | 			InsertText:       "time_bucket_ng('$1', $2)",
 81 | 			Parameters:       []string{"interval", "timestamp", "timezone"},
 82 | 			ReturnType:       "timestamp with time zone",
 83 | 			Category:         "TimescaleDB",
 84 | 			CommitCharacters: []string{"("},
 85 | 		},
 86 | 		{
 87 | 			Name:             "time_bucket",
 88 | 			Type:             "function",
 89 | 			Documentation:    "TimescaleDB function that groups time into buckets with an offset.",
 90 | 			InsertText:       "time_bucket($1, $2, $3)",
 91 | 			Parameters:       []string{"interval", "timestamp", "offset"},
 92 | 			ReturnType:       "timestamp",
 93 | 			Category:         "TimescaleDB",
 94 | 			CommitCharacters: []string{"("},
 95 | 		},
 96 | 	}
 97 | 
 98 | 	// Add version information to documentation
 99 | 	for i := range completions {
100 | 		completions[i].Documentation = fmt.Sprintf("TimescaleDB v%s: %s", tsdbContext.Version, completions[i].Documentation)
101 | 	}
102 | 
103 | 	return completions, nil
104 | }
105 | 
106 | // GetHypertableFunctionCompletions returns completions for hypertable management functions
107 | func (p *TimescaleDBCompletionProvider) GetHypertableFunctionCompletions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]CompletionItem, error) {
108 | 	// First check if TimescaleDB is available
109 | 	tsdbContext, err := p.contextProvider.DetectTimescaleDB(ctx, dbID, useCase)
110 | 	if err != nil {
111 | 		return nil, fmt.Errorf("failed to detect TimescaleDB: %w", err)
112 | 	}
113 | 
114 | 	if !tsdbContext.IsTimescaleDB {
115 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
116 | 	}
117 | 
118 | 	// Define hypertable function completions
119 | 	completions := []CompletionItem{
120 | 		{
121 | 			Name:             "create_hypertable",
122 | 			Type:             "function",
123 | 			Documentation:    "TimescaleDB function that converts a standard PostgreSQL table into a hypertable partitioned by time.",
124 | 			InsertText:       "create_hypertable('$1', '$2')",
125 | 			Parameters:       []string{"table_name", "time_column_name"},
126 | 			ReturnType:       "void",
127 | 			Category:         "TimescaleDB",
128 | 			CommitCharacters: []string{"("},
129 | 		},
130 | 		{
131 | 			Name:             "add_dimension",
132 | 			Type:             "function",
133 | 			Documentation:    "TimescaleDB function that adds another dimension to a hypertable for partitioning.",
134 | 			InsertText:       "add_dimension('$1', '$2')",
135 | 			Parameters:       []string{"hypertable", "column_name"},
136 | 			ReturnType:       "void",
137 | 			Category:         "TimescaleDB",
138 | 			CommitCharacters: []string{"("},
139 | 		},
140 | 		{
141 | 			Name:             "add_compression_policy",
142 | 			Type:             "function",
143 | 			Documentation:    "TimescaleDB function that adds an automatic compression policy to a hypertable.",
144 | 			InsertText:       "add_compression_policy('$1', INTERVAL '$2')",
145 | 			Parameters:       []string{"hypertable", "older_than"},
146 | 			ReturnType:       "integer",
147 | 			Category:         "TimescaleDB",
148 | 			CommitCharacters: []string{"("},
149 | 		},
150 | 		{
151 | 			Name:             "add_retention_policy",
152 | 			Type:             "function",
153 | 			Documentation:    "TimescaleDB function that adds an automatic data retention policy to a hypertable.",
154 | 			InsertText:       "add_retention_policy('$1', INTERVAL '$2')",
155 | 			Parameters:       []string{"hypertable", "drop_after"},
156 | 			ReturnType:       "integer",
157 | 			Category:         "TimescaleDB",
158 | 			CommitCharacters: []string{"("},
159 | 		},
160 | 		{
161 | 			Name:             "alter_job",
162 | 			Type:             "function",
163 | 			Documentation:    "TimescaleDB function that alters a policy job's schedule or configuration.",
164 | 			InsertText:       "alter_job($1, scheduled => true)",
165 | 			Parameters:       []string{"job_id"},
166 | 			ReturnType:       "integer",
167 | 			Category:         "TimescaleDB",
168 | 			CommitCharacters: []string{"("},
169 | 		},
170 | 		{
171 | 			Name:             "hypertable_size",
172 | 			Type:             "function",
173 | 			Documentation:    "TimescaleDB function that shows the size of a hypertable, including all chunks.",
174 | 			InsertText:       "hypertable_size('$1')",
175 | 			Parameters:       []string{"hypertable"},
176 | 			ReturnType:       "bigint",
177 | 			Category:         "TimescaleDB",
178 | 			CommitCharacters: []string{"("},
179 | 		},
180 | 		{
181 | 			Name:             "hypertable_detailed_size",
182 | 			Type:             "function",
183 | 			Documentation:    "TimescaleDB function that shows detailed size information for a hypertable.",
184 | 			InsertText:       "hypertable_detailed_size('$1')",
185 | 			Parameters:       []string{"hypertable"},
186 | 			ReturnType:       "table",
187 | 			Category:         "TimescaleDB",
188 | 			CommitCharacters: []string{"("},
189 | 		},
190 | 	}
191 | 
192 | 	// Add version information to documentation
193 | 	for i := range completions {
194 | 		completions[i].Documentation = fmt.Sprintf("TimescaleDB v%s: %s", tsdbContext.Version, completions[i].Documentation)
195 | 	}
196 | 
197 | 	return completions, nil
198 | }
199 | 
200 | // GetContinuousAggregateFunctionCompletions returns completions for continuous aggregate functions
201 | func (p *TimescaleDBCompletionProvider) GetContinuousAggregateFunctionCompletions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]CompletionItem, error) {
202 | 	// First check if TimescaleDB is available
203 | 	tsdbContext, err := p.contextProvider.DetectTimescaleDB(ctx, dbID, useCase)
204 | 	if err != nil {
205 | 		return nil, fmt.Errorf("failed to detect TimescaleDB: %w", err)
206 | 	}
207 | 
208 | 	if !tsdbContext.IsTimescaleDB {
209 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
210 | 	}
211 | 
212 | 	// Define continuous aggregate function completions
213 | 	completions := []CompletionItem{
214 | 		{
215 | 			Name:             "create_materialized_view",
216 | 			Type:             "function",
217 | 			Documentation:    "TimescaleDB function that creates a continuous aggregate view.",
218 | 			InsertText:       "CREATE MATERIALIZED VIEW $1 WITH (timescaledb.continuous) AS SELECT $2 FROM $3 GROUP BY $4;",
219 | 			Category:         "TimescaleDB",
220 | 			CommitCharacters: []string{"("},
221 | 		},
222 | 		{
223 | 			Name:             "add_continuous_aggregate_policy",
224 | 			Type:             "function",
225 | 			Documentation:    "TimescaleDB function that adds a refresh policy to a continuous aggregate.",
226 | 			InsertText:       "add_continuous_aggregate_policy('$1', start_offset => INTERVAL '$2', end_offset => INTERVAL '$3', schedule_interval => INTERVAL '$4')",
227 | 			Parameters:       []string{"continuous_aggregate", "start_offset", "end_offset", "schedule_interval"},
228 | 			ReturnType:       "integer",
229 | 			Category:         "TimescaleDB",
230 | 			CommitCharacters: []string{"("},
231 | 		},
232 | 		{
233 | 			Name:             "refresh_continuous_aggregate",
234 | 			Type:             "function",
235 | 			Documentation:    "TimescaleDB function that manually refreshes a continuous aggregate for a specific time range.",
236 | 			InsertText:       "refresh_continuous_aggregate('$1', '$2', '$3')",
237 | 			Parameters:       []string{"continuous_aggregate", "start_time", "end_time"},
238 | 			ReturnType:       "void",
239 | 			Category:         "TimescaleDB",
240 | 			CommitCharacters: []string{"("},
241 | 		},
242 | 	}
243 | 
244 | 	// Add version information to documentation
245 | 	for i := range completions {
246 | 		completions[i].Documentation = fmt.Sprintf("TimescaleDB v%s: %s", tsdbContext.Version, completions[i].Documentation)
247 | 	}
248 | 
249 | 	return completions, nil
250 | }
251 | 
252 | // GetAnalyticsFunctionCompletions returns completions for TimescaleDB's analytics functions
253 | func (p *TimescaleDBCompletionProvider) GetAnalyticsFunctionCompletions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]CompletionItem, error) {
254 | 	// First check if TimescaleDB is available
255 | 	tsdbContext, err := p.contextProvider.DetectTimescaleDB(ctx, dbID, useCase)
256 | 	if err != nil {
257 | 		return nil, fmt.Errorf("failed to detect TimescaleDB: %w", err)
258 | 	}
259 | 
260 | 	if !tsdbContext.IsTimescaleDB {
261 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
262 | 	}
263 | 
264 | 	// Define analytics function completions
265 | 	completions := []CompletionItem{
266 | 		{
267 | 			Name:             "first",
268 | 			Type:             "function",
269 | 			Documentation:    "TimescaleDB function that returns the value of the specified column at the first time ordered by time within each group.",
270 | 			InsertText:       "first($1, $2)",
271 | 			Parameters:       []string{"value", "time"},
272 | 			ReturnType:       "same as value",
273 | 			Category:         "TimescaleDB",
274 | 			CommitCharacters: []string{"("},
275 | 		},
276 | 		{
277 | 			Name:             "last",
278 | 			Type:             "function",
279 | 			Documentation:    "TimescaleDB function that returns the value of the specified column at the last time ordered by time within each group.",
280 | 			InsertText:       "last($1, $2)",
281 | 			Parameters:       []string{"value", "time"},
282 | 			ReturnType:       "same as value",
283 | 			Category:         "TimescaleDB",
284 | 			CommitCharacters: []string{"("},
285 | 		},
286 | 		{
287 | 			Name:             "time_weight",
288 | 			Type:             "function",
289 | 			Documentation:    "TimescaleDB function that returns the time-weighted average of a value over time.",
290 | 			InsertText:       "time_weight($1, $2)",
291 | 			Parameters:       []string{"value", "time"},
292 | 			ReturnType:       "double precision",
293 | 			Category:         "TimescaleDB",
294 | 			CommitCharacters: []string{"("},
295 | 		},
296 | 		{
297 | 			Name:             "histogram",
298 | 			Type:             "function",
299 | 			Documentation:    "TimescaleDB function that buckets values and returns a histogram showing the distribution.",
300 | 			InsertText:       "histogram($1, $2, $3, $4)",
301 | 			Parameters:       []string{"value", "min", "max", "num_buckets"},
302 | 			ReturnType:       "histogram",
303 | 			Category:         "TimescaleDB",
304 | 			CommitCharacters: []string{"("},
305 | 		},
306 | 		{
307 | 			Name:             "approx_percentile",
308 | 			Type:             "function",
309 | 			Documentation:    "TimescaleDB function that calculates approximate percentiles using the t-digest method.",
310 | 			InsertText:       "approx_percentile($1, $2)",
311 | 			Parameters:       []string{"value", "percentile"},
312 | 			ReturnType:       "double precision",
313 | 			Category:         "TimescaleDB",
314 | 			CommitCharacters: []string{"("},
315 | 		},
316 | 	}
317 | 
318 | 	// Add version information to documentation
319 | 	for i := range completions {
320 | 		completions[i].Documentation = fmt.Sprintf("TimescaleDB v%s: %s", tsdbContext.Version, completions[i].Documentation)
321 | 	}
322 | 
323 | 	return completions, nil
324 | }
325 | 
326 | // GetAllFunctionCompletions returns completions for all TimescaleDB functions
327 | func (p *TimescaleDBCompletionProvider) GetAllFunctionCompletions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]CompletionItem, error) {
328 | 	// Check if TimescaleDB is available by using the DetectTimescaleDB method
329 | 	// which already checks the database type
330 | 	tsdbContext, err := p.contextProvider.DetectTimescaleDB(ctx, dbID, useCase)
331 | 	if err != nil {
332 | 		return nil, fmt.Errorf("failed to detect TimescaleDB: %w", err)
333 | 	}
334 | 
335 | 	if !tsdbContext.IsTimescaleDB {
336 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
337 | 	}
338 | 
339 | 	// Define predefined completions for all categories
340 | 	var allCompletions []CompletionItem
341 | 
342 | 	// Time bucket functions
343 | 	allCompletions = append(allCompletions, []CompletionItem{
344 | 		{
345 | 			Name:             "time_bucket",
346 | 			Type:             "function",
347 | 			Documentation:    "TimescaleDB function that groups time into buckets. Useful for downsampling time-series data.",
348 | 			InsertText:       "time_bucket($1, $2)",
349 | 			Parameters:       []string{"interval", "timestamp"},
350 | 			ReturnType:       "timestamp",
351 | 			Category:         "TimescaleDB",
352 | 			CommitCharacters: []string{"("},
353 | 		},
354 | 		{
355 | 			Name:             "time_bucket_gapfill",
356 | 			Type:             "function",
357 | 			Documentation:    "TimescaleDB function similar to time_bucket but fills in missing values (gaps) in the result.",
358 | 			InsertText:       "time_bucket_gapfill($1, $2)",
359 | 			Parameters:       []string{"interval", "timestamp"},
360 | 			ReturnType:       "timestamp",
361 | 			Category:         "TimescaleDB",
362 | 			CommitCharacters: []string{"("},
363 | 		},
364 | 		{
365 | 			Name:             "time_bucket_ng",
366 | 			Type:             "function",
367 | 			Documentation:    "TimescaleDB next-generation time bucket function that supports timezone-aware bucketing.",
368 | 			InsertText:       "time_bucket_ng('$1', $2)",
369 | 			Parameters:       []string{"interval", "timestamp", "timezone"},
370 | 			ReturnType:       "timestamp with time zone",
371 | 			Category:         "TimescaleDB",
372 | 			CommitCharacters: []string{"("},
373 | 		},
374 | 	}...)
375 | 
376 | 	// Hypertable functions
377 | 	allCompletions = append(allCompletions, []CompletionItem{
378 | 		{
379 | 			Name:             "create_hypertable",
380 | 			Type:             "function",
381 | 			Documentation:    "TimescaleDB function that converts a standard PostgreSQL table into a hypertable partitioned by time.",
382 | 			InsertText:       "create_hypertable('$1', '$2')",
383 | 			Parameters:       []string{"table_name", "time_column_name"},
384 | 			ReturnType:       "void",
385 | 			Category:         "TimescaleDB",
386 | 			CommitCharacters: []string{"("},
387 | 		},
388 | 		{
389 | 			Name:             "add_dimension",
390 | 			Type:             "function",
391 | 			Documentation:    "TimescaleDB function that adds another dimension to a hypertable for partitioning.",
392 | 			InsertText:       "add_dimension('$1', '$2')",
393 | 			Parameters:       []string{"hypertable", "column_name"},
394 | 			ReturnType:       "void",
395 | 			Category:         "TimescaleDB",
396 | 			CommitCharacters: []string{"("},
397 | 		},
398 | 		{
399 | 			Name:             "add_compression_policy",
400 | 			Type:             "function",
401 | 			Documentation:    "TimescaleDB function that adds an automatic compression policy to a hypertable.",
402 | 			InsertText:       "add_compression_policy('$1', INTERVAL '$2')",
403 | 			Parameters:       []string{"hypertable", "older_than"},
404 | 			ReturnType:       "integer",
405 | 			Category:         "TimescaleDB",
406 | 			CommitCharacters: []string{"("},
407 | 		},
408 | 		{
409 | 			Name:             "add_retention_policy",
410 | 			Type:             "function",
411 | 			Documentation:    "TimescaleDB function that adds an automatic data retention policy to a hypertable.",
412 | 			InsertText:       "add_retention_policy('$1', INTERVAL '$2')",
413 | 			Parameters:       []string{"hypertable", "drop_after"},
414 | 			ReturnType:       "integer",
415 | 			Category:         "TimescaleDB",
416 | 			CommitCharacters: []string{"("},
417 | 		},
418 | 	}...)
419 | 
420 | 	// Continuous aggregate functions
421 | 	allCompletions = append(allCompletions, []CompletionItem{
422 | 		{
423 | 			Name:             "create_materialized_view",
424 | 			Type:             "function",
425 | 			Documentation:    "TimescaleDB function that creates a continuous aggregate view.",
426 | 			InsertText:       "CREATE MATERIALIZED VIEW $1 WITH (timescaledb.continuous) AS SELECT $2 FROM $3 GROUP BY $4;",
427 | 			Category:         "TimescaleDB",
428 | 			CommitCharacters: []string{"("},
429 | 		},
430 | 		{
431 | 			Name:             "add_continuous_aggregate_policy",
432 | 			Type:             "function",
433 | 			Documentation:    "TimescaleDB function that adds a refresh policy to a continuous aggregate.",
434 | 			InsertText:       "add_continuous_aggregate_policy('$1', start_offset => INTERVAL '$2', end_offset => INTERVAL '$3', schedule_interval => INTERVAL '$4')",
435 | 			Parameters:       []string{"continuous_aggregate", "start_offset", "end_offset", "schedule_interval"},
436 | 			ReturnType:       "integer",
437 | 			Category:         "TimescaleDB",
438 | 			CommitCharacters: []string{"("},
439 | 		},
440 | 	}...)
441 | 
442 | 	// Analytics functions
443 | 	allCompletions = append(allCompletions, []CompletionItem{
444 | 		{
445 | 			Name:             "first",
446 | 			Type:             "function",
447 | 			Documentation:    "TimescaleDB function that returns the value of the specified column at the first time ordered by time within each group.",
448 | 			InsertText:       "first($1, $2)",
449 | 			Parameters:       []string{"value", "time"},
450 | 			ReturnType:       "same as value",
451 | 			Category:         "TimescaleDB",
452 | 			CommitCharacters: []string{"("},
453 | 		},
454 | 		{
455 | 			Name:             "last",
456 | 			Type:             "function",
457 | 			Documentation:    "TimescaleDB function that returns the value of the specified column at the last time ordered by time within each group.",
458 | 			InsertText:       "last($1, $2)",
459 | 			Parameters:       []string{"value", "time"},
460 | 			ReturnType:       "same as value",
461 | 			Category:         "TimescaleDB",
462 | 			CommitCharacters: []string{"("},
463 | 		},
464 | 		{
465 | 			Name:             "time_weight",
466 | 			Type:             "function",
467 | 			Documentation:    "TimescaleDB function that returns the time-weighted average of a value over time.",
468 | 			InsertText:       "time_weight($1, $2)",
469 | 			Parameters:       []string{"value", "time"},
470 | 			ReturnType:       "double precision",
471 | 			Category:         "TimescaleDB",
472 | 			CommitCharacters: []string{"("},
473 | 		},
474 | 	}...)
475 | 
476 | 	// Add version information to documentation
477 | 	for i := range allCompletions {
478 | 		allCompletions[i].Documentation = fmt.Sprintf("TimescaleDB v%s: %s", tsdbContext.Version, allCompletions[i].Documentation)
479 | 	}
480 | 
481 | 	return allCompletions, nil
482 | }
483 | 
484 | // GetQuerySuggestions returns TimescaleDB query suggestions based on the database schema
485 | func (p *TimescaleDBCompletionProvider) GetQuerySuggestions(ctx context.Context, dbID string, useCase UseCaseProvider) ([]QuerySuggestion, error) {
486 | 	// First check if TimescaleDB is available
487 | 	tsdbContext, err := p.contextProvider.GetTimescaleDBContext(ctx, dbID, useCase)
488 | 	if err != nil {
489 | 		return nil, fmt.Errorf("failed to get TimescaleDB context: %w", err)
490 | 	}
491 | 
492 | 	if !tsdbContext.IsTimescaleDB {
493 | 		return nil, fmt.Errorf("TimescaleDB is not available in the database %s", dbID)
494 | 	}
495 | 
496 | 	// Base suggestions that don't depend on schema
497 | 	suggestions := []QuerySuggestion{
498 | 		{
499 | 			Title:       "Basic Time Bucket Aggregation",
500 | 			Description: "Groups time-series data into time buckets and calculates aggregates",
501 | 			Query:       "SELECT time_bucket('1 hour', time_column) AS bucket, avg(value_column), min(value_column), max(value_column) FROM table_name WHERE time_column > now() - INTERVAL '1 day' GROUP BY bucket ORDER BY bucket;",
502 | 			Category:    "Time Buckets",
503 | 		},
504 | 		{
505 | 			Title:       "Time Bucket with Gap Filling",
506 | 			Description: "Groups time-series data with gap filling for missing values",
507 | 			Query:       "SELECT time_bucket_gapfill('1 hour', time_column) AS bucket, avg(value_column), min(value_column), max(value_column) FROM table_name WHERE time_column > now() - INTERVAL '1 day' AND time_column <= now() GROUP BY bucket ORDER BY bucket;",
508 | 			Category:    "Time Buckets",
509 | 		},
510 | 		{
511 | 			Title:       "Create Hypertable",
512 | 			Description: "Converts a standard PostgreSQL table into a TimescaleDB hypertable",
513 | 			Query:       "SELECT create_hypertable('table_name', 'time_column');",
514 | 			Category:    "Hypertable Management",
515 | 		},
516 | 		{
517 | 			Title:       "Add Compression Policy",
518 | 			Description: "Adds an automatic compression policy to a hypertable",
519 | 			Query:       "SELECT add_compression_policy('table_name', INTERVAL '7 days');",
520 | 			Category:    "Hypertable Management",
521 | 		},
522 | 		{
523 | 			Title:       "Add Retention Policy",
524 | 			Description: "Adds an automatic data retention policy to a hypertable",
525 | 			Query:       "SELECT add_retention_policy('table_name', INTERVAL '30 days');",
526 | 			Category:    "Hypertable Management",
527 | 		},
528 | 		{
529 | 			Title:       "Create Continuous Aggregate",
530 | 			Description: "Creates a materialized view that automatically maintains aggregated data",
531 | 			Query:       "CREATE MATERIALIZED VIEW view_name WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', time_column) as bucket, avg(value_column) FROM table_name GROUP BY bucket;",
532 | 			Category:    "Continuous Aggregates",
533 | 		},
534 | 		{
535 | 			Title:       "Add Continuous Aggregate Policy",
536 | 			Description: "Adds a refresh policy to a continuous aggregate",
537 | 			Query:       "SELECT add_continuous_aggregate_policy('view_name', start_offset => INTERVAL '2 days', end_offset => INTERVAL '1 hour', schedule_interval => INTERVAL '1 hour');",
538 | 			Category:    "Continuous Aggregates",
539 | 		},
540 | 		{
541 | 			Title:       "Hypertable Size",
542 | 			Description: "Shows the size of a hypertable including all chunks",
543 | 			Query:       "SELECT * FROM hypertable_size('table_name');",
544 | 			Category:    "Diagnostics",
545 | 		},
546 | 		{
547 | 			Title:       "Hypertable Detailed Size",
548 | 			Description: "Shows detailed size information for a hypertable",
549 | 			Query:       "SELECT * FROM hypertable_detailed_size('table_name');",
550 | 			Category:    "Diagnostics",
551 | 		},
552 | 		{
553 | 			Title:       "Compression Stats",
554 | 			Description: "Shows compression statistics for a hypertable",
555 | 			Query:       "SELECT * FROM hypertable_compression_stats('table_name');",
556 | 			Category:    "Diagnostics",
557 | 		},
558 | 		{
559 | 			Title:       "Job Stats",
560 | 			Description: "Shows statistics for background jobs like compression and retention policies",
561 | 			Query:       "SELECT * FROM timescaledb_information.jobs;",
562 | 			Category:    "Diagnostics",
563 | 		},
564 | 	}
565 | 
566 | 	// If we have hypertable information, use it to create tailored suggestions
567 | 	if len(tsdbContext.Hypertables) > 0 {
568 | 		for _, ht := range tsdbContext.Hypertables {
569 | 			tableName := ht.TableName
570 | 			timeColumn := ht.TimeColumn
571 | 
572 | 			// Skip if we don't have both table name and time column
573 | 			if tableName == "" || timeColumn == "" {
574 | 				continue
575 | 			}
576 | 
577 | 			// Add schema-specific suggestions
578 | 			suggestions = append(suggestions, []QuerySuggestion{
579 | 				{
580 | 					Title:       fmt.Sprintf("Time Bucket Aggregation for %s", tableName),
581 | 					Description: fmt.Sprintf("Groups data from %s table into time buckets", tableName),
582 | 					Query:       fmt.Sprintf("SELECT time_bucket('1 hour', %s) AS bucket, avg(value_column) FROM %s WHERE %s > now() - INTERVAL '1 day' GROUP BY bucket ORDER BY bucket;", timeColumn, tableName, timeColumn),
583 | 					Category:    "Time Buckets",
584 | 				},
585 | 				{
586 | 					Title:       fmt.Sprintf("Compression Policy for %s", tableName),
587 | 					Description: fmt.Sprintf("Adds compression policy to %s hypertable", tableName),
588 | 					Query:       fmt.Sprintf("SELECT add_compression_policy('%s', INTERVAL '7 days');", tableName),
589 | 					Category:    "Hypertable Management",
590 | 				},
591 | 				{
592 | 					Title:       fmt.Sprintf("Retention Policy for %s", tableName),
593 | 					Description: fmt.Sprintf("Adds retention policy to %s hypertable", tableName),
594 | 					Query:       fmt.Sprintf("SELECT add_retention_policy('%s', INTERVAL '30 days');", tableName),
595 | 					Category:    "Hypertable Management",
596 | 				},
597 | 				{
598 | 					Title:       fmt.Sprintf("Continuous Aggregate for %s", tableName),
599 | 					Description: fmt.Sprintf("Creates a continuous aggregate view for %s", tableName),
600 | 					Query:       fmt.Sprintf("CREATE MATERIALIZED VIEW %s_hourly WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', %s) as bucket, avg(value_column) FROM %s GROUP BY bucket;", tableName, timeColumn, tableName),
601 | 					Category:    "Continuous Aggregates",
602 | 				},
603 | 				{
604 | 					Title:       fmt.Sprintf("Recent Data from %s", tableName),
605 | 					Description: fmt.Sprintf("Retrieves recent data from %s with time ordering", tableName),
606 | 					Query:       fmt.Sprintf("SELECT * FROM %s WHERE %s > now() - INTERVAL '1 day' ORDER BY %s DESC LIMIT 100;", tableName, timeColumn, timeColumn),
607 | 					Category:    "Data Retrieval",
608 | 				},
609 | 				{
610 | 					Title:       fmt.Sprintf("First/Last Analysis for %s", tableName),
611 | 					Description: fmt.Sprintf("Uses first/last functions to analyze %s by segments", tableName),
612 | 					Query:       fmt.Sprintf("SELECT segment_column, first(value_column, %s), last(value_column, %s) FROM %s GROUP BY segment_column;", timeColumn, timeColumn, tableName),
613 | 					Category:    "Analytics",
614 | 				},
615 | 			}...)
616 | 		}
617 | 	}
618 | 
619 | 	return suggestions, nil
620 | }
621 | 
```

--------------------------------------------------------------------------------
/pkg/dbtools/dbtools.go:
--------------------------------------------------------------------------------

```go
   1 | package dbtools
   2 | 
   3 | import (
   4 | 	"context"
   5 | 	"database/sql"
   6 | 	"encoding/json"
   7 | 	"fmt"
   8 | 	"os"
   9 | 	"path/filepath"
  10 | 	"strconv"
  11 | 	"strings"
  12 | 	"time"
  13 | 
  14 | 	"github.com/FreePeak/db-mcp-server/pkg/db"
  15 | 	"github.com/FreePeak/db-mcp-server/pkg/logger"
  16 | 	"github.com/FreePeak/db-mcp-server/pkg/tools"
  17 | )
  18 | 
  19 | // TODO: Refactor database connection management to support connection pooling
  20 | // TODO: Add support for connection retries and circuit breaking
  21 | // TODO: Implement comprehensive metrics collection for database operations
  22 | // TODO: Consider using a context-aware connection management system
  23 | // TODO: Add support for database migrations and versioning
  24 | // TODO: Improve error handling with custom error types
  25 | 
  26 | // DatabaseType represents a supported database type
  27 | type DatabaseType string
  28 | 
  29 | const (
  30 | 	// MySQL database type
  31 | 	MySQL DatabaseType = "mysql"
  32 | 	// Postgres database type
  33 | 	Postgres DatabaseType = "postgres"
  34 | )
  35 | 
  36 | // Config represents database configuration
  37 | type Config struct {
  38 | 	ConfigFile  string
  39 | 	Connections []ConnectionConfig
  40 | }
  41 | 
  42 | // ConnectionConfig represents a single database connection configuration
  43 | type ConnectionConfig struct {
  44 | 	ID       string       `json:"id"`
  45 | 	Type     DatabaseType `json:"type"`
  46 | 	Host     string       `json:"host"`
  47 | 	Port     int          `json:"port"`
  48 | 	Name     string       `json:"name"`
  49 | 	User     string       `json:"user"`
  50 | 	Password string       `json:"password"`
  51 | }
  52 | 
  53 | // MultiDBConfig represents configuration for multiple database connections
  54 | type MultiDBConfig struct {
  55 | 	Connections []ConnectionConfig `json:"connections"`
  56 | }
  57 | 
  58 | // Database connection manager (singleton)
  59 | var (
  60 | 	dbManager *db.Manager
  61 | )
  62 | 
  63 | // DatabaseConnectionInfo represents detailed information about a database connection
  64 | type DatabaseConnectionInfo struct {
  65 | 	ID      string       `json:"id"`
  66 | 	Type    DatabaseType `json:"type"`
  67 | 	Host    string       `json:"host"`
  68 | 	Port    int          `json:"port"`
  69 | 	Name    string       `json:"name"`
  70 | 	Status  string       `json:"status"`
  71 | 	Latency string       `json:"latency,omitempty"`
  72 | }
  73 | 
  74 | // InitDatabase initializes the database connections
  75 | func InitDatabase(cfg *Config) error {
  76 | 	// Create database manager
  77 | 	dbManager = db.NewDBManager()
  78 | 
  79 | 	var multiDBConfig *MultiDBConfig
  80 | 
  81 | 	// If config file is provided, load it
  82 | 	if cfg != nil && cfg.ConfigFile != "" {
  83 | 		// Read config file
  84 | 		configData, err := os.ReadFile(cfg.ConfigFile)
  85 | 		if err != nil {
  86 | 			logger.Warn("Warning: failed to read config file %s: %v", cfg.ConfigFile, err)
  87 | 			// Don't return error, try other methods
  88 | 		} else {
  89 | 			// Parse config
  90 | 			multiDBConfig = &MultiDBConfig{}
  91 | 			if err := json.Unmarshal(configData, multiDBConfig); err != nil {
  92 | 				logger.Warn("Warning: failed to parse config file %s: %v", cfg.ConfigFile, err)
  93 | 				// Don't return error, try other methods
  94 | 			} else {
  95 | 				logger.Info("Loaded database config from file: %s", cfg.ConfigFile)
  96 | 				// Debug logging of connection details
  97 | 				for i, conn := range multiDBConfig.Connections {
  98 | 					logger.Info("Connection [%d]: ID=%s, Type=%s, Host=%s, Port=%d, Name=%s",
  99 | 						i, conn.ID, conn.Type, conn.Host, conn.Port, conn.Name)
 100 | 				}
 101 | 			}
 102 | 		}
 103 | 	}
 104 | 
 105 | 	// If config was not loaded from file, try direct connections config
 106 | 	if multiDBConfig == nil || len(multiDBConfig.Connections) == 0 {
 107 | 		if cfg != nil && len(cfg.Connections) > 0 {
 108 | 			// Use connections from direct config
 109 | 			multiDBConfig = &MultiDBConfig{
 110 | 				Connections: cfg.Connections,
 111 | 			}
 112 | 			logger.Info("Using database connections from direct configuration")
 113 | 		} else {
 114 | 			// Try to load from environment variable
 115 | 			dbConfigJSON := os.Getenv("DB_CONFIG")
 116 | 			if dbConfigJSON != "" {
 117 | 				multiDBConfig = &MultiDBConfig{}
 118 | 				if err := json.Unmarshal([]byte(dbConfigJSON), multiDBConfig); err != nil {
 119 | 					logger.Warn("Warning: failed to parse DB_CONFIG environment variable: %v", err)
 120 | 					// Don't return error, try legacy method
 121 | 				} else {
 122 | 					logger.Info("Loaded database config from DB_CONFIG environment variable")
 123 | 				}
 124 | 			}
 125 | 		}
 126 | 	}
 127 | 
 128 | 	// If no config loaded yet, try legacy single connection from environment
 129 | 	if multiDBConfig == nil || len(multiDBConfig.Connections) == 0 {
 130 | 		// Create a single connection from environment variables
 131 | 		dbType := os.Getenv("DB_TYPE")
 132 | 		if dbType == "" {
 133 | 			dbType = "mysql" // Default type
 134 | 		}
 135 | 
 136 | 		dbHost := os.Getenv("DB_HOST")
 137 | 		dbPortStr := os.Getenv("DB_PORT")
 138 | 		dbUser := os.Getenv("DB_USER")
 139 | 		dbPassword := os.Getenv("DB_PASSWORD")
 140 | 		dbName := os.Getenv("DB_NAME")
 141 | 
 142 | 		// If we have basic connection details, create a config
 143 | 		if dbHost != "" && dbUser != "" {
 144 | 			dbPort, err := strconv.Atoi(dbPortStr)
 145 | 			if err != nil || dbPort == 0 {
 146 | 				dbPort = 3306 // Default MySQL port
 147 | 			}
 148 | 
 149 | 			multiDBConfig = &MultiDBConfig{
 150 | 				Connections: []ConnectionConfig{
 151 | 					{
 152 | 						ID:       "default",
 153 | 						Type:     DatabaseType(dbType),
 154 | 						Host:     dbHost,
 155 | 						Port:     dbPort,
 156 | 						Name:     dbName,
 157 | 						User:     dbUser,
 158 | 						Password: dbPassword,
 159 | 					},
 160 | 				},
 161 | 			}
 162 | 			logger.Info("Created database config from environment variables")
 163 | 		}
 164 | 	}
 165 | 
 166 | 	// If still no config, return error
 167 | 	if multiDBConfig == nil || len(multiDBConfig.Connections) == 0 {
 168 | 		return fmt.Errorf("no database configuration provided")
 169 | 	}
 170 | 
 171 | 	// Convert config to JSON for loading
 172 | 	configJSON, err := json.Marshal(multiDBConfig)
 173 | 	if err != nil {
 174 | 		return fmt.Errorf("failed to marshal database config: %w", err)
 175 | 	}
 176 | 
 177 | 	if err := dbManager.LoadConfig(configJSON); err != nil {
 178 | 		return fmt.Errorf("failed to load database config: %w", err)
 179 | 	}
 180 | 
 181 | 	// Connect to all databases
 182 | 	if err := dbManager.Connect(); err != nil {
 183 | 		return fmt.Errorf("failed to connect to databases: %w", err)
 184 | 	}
 185 | 
 186 | 	// Log connected databases
 187 | 	dbs := dbManager.ListDatabases()
 188 | 	logger.Info("Connected to %d databases: %v", len(dbs), dbs)
 189 | 
 190 | 	return nil
 191 | }
 192 | 
 193 | // CloseDatabase closes all database connections
 194 | func CloseDatabase() error {
 195 | 	if dbManager == nil {
 196 | 		return nil
 197 | 	}
 198 | 	return dbManager.CloseAll()
 199 | }
 200 | 
 201 | // GetDatabase returns a database instance by ID
 202 | func GetDatabase(id string) (db.Database, error) {
 203 | 	if dbManager == nil {
 204 | 		return nil, fmt.Errorf("database manager not initialized")
 205 | 	}
 206 | 	return dbManager.GetDatabase(id)
 207 | }
 208 | 
 209 | // ListDatabases returns a list of available database connections
 210 | func ListDatabases() []string {
 211 | 	if dbManager == nil {
 212 | 		return nil
 213 | 	}
 214 | 	return dbManager.ListDatabases()
 215 | }
 216 | 
 217 | // showConnectedDatabases returns information about all connected databases
 218 | func showConnectedDatabases(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 219 | 	if dbManager == nil {
 220 | 		return nil, fmt.Errorf("database manager not initialized")
 221 | 	}
 222 | 
 223 | 	var connections []DatabaseConnectionInfo
 224 | 	dbIDs := ListDatabases()
 225 | 
 226 | 	for _, dbID := range dbIDs {
 227 | 		database, err := GetDatabase(dbID)
 228 | 		if err != nil {
 229 | 			continue
 230 | 		}
 231 | 
 232 | 		// Get connection details
 233 | 		connInfo := DatabaseConnectionInfo{
 234 | 			ID: dbID,
 235 | 		}
 236 | 
 237 | 		// Check connection status and measure latency
 238 | 		start := time.Now()
 239 | 		err = database.Ping(ctx)
 240 | 		latency := time.Since(start)
 241 | 
 242 | 		if err != nil {
 243 | 			connInfo.Status = "disconnected"
 244 | 			connInfo.Latency = "n/a"
 245 | 		} else {
 246 | 			connInfo.Status = "connected"
 247 | 			connInfo.Latency = latency.String()
 248 | 		}
 249 | 
 250 | 		connections = append(connections, connInfo)
 251 | 	}
 252 | 
 253 | 	return connections, nil
 254 | }
 255 | 
 256 | // RegisterDatabaseTools registers all database tools with the provided registry
 257 | func RegisterDatabaseTools(registry *tools.Registry) error {
 258 | 	// Register schema explorer tool
 259 | 	registry.RegisterTool(&tools.Tool{
 260 | 		Name:        "dbSchema",
 261 | 		Description: "Auto-discover database structure and relationships",
 262 | 		InputSchema: tools.ToolInputSchema{
 263 | 			Type: "object",
 264 | 			Properties: map[string]interface{}{
 265 | 				"database": map[string]interface{}{
 266 | 					"type":        "string",
 267 | 					"description": "Database name to explore (optional, leave empty for all databases)",
 268 | 				},
 269 | 				"component": map[string]interface{}{
 270 | 					"type":        "string",
 271 | 					"description": "Component to explore (tables, columns, indices, or all)",
 272 | 					"enum":        []string{"tables", "columns", "indices", "all"},
 273 | 				},
 274 | 				"table": map[string]interface{}{
 275 | 					"type":        "string",
 276 | 					"description": "Specific table to explore (optional)",
 277 | 				},
 278 | 			},
 279 | 		},
 280 | 		Handler: handleSchemaExplorer,
 281 | 	})
 282 | 
 283 | 	// Register query tool
 284 | 	registry.RegisterTool(&tools.Tool{
 285 | 		Name:        "dbQuery",
 286 | 		Description: "Execute SQL query and return results",
 287 | 		InputSchema: tools.ToolInputSchema{
 288 | 			Type: "object",
 289 | 			Properties: map[string]interface{}{
 290 | 				"query": map[string]interface{}{
 291 | 					"type":        "string",
 292 | 					"description": "SQL query to execute",
 293 | 				},
 294 | 				"database": map[string]interface{}{
 295 | 					"type":        "string",
 296 | 					"description": "Database ID to query (optional if only one database is configured)",
 297 | 				},
 298 | 				"params": map[string]interface{}{
 299 | 					"type":        "array",
 300 | 					"description": "Parameters for the query (for prepared statements)",
 301 | 					"items": map[string]interface{}{
 302 | 						"type": "string",
 303 | 					},
 304 | 				},
 305 | 				"timeout": map[string]interface{}{
 306 | 					"type":        "integer",
 307 | 					"description": "Query timeout in milliseconds (default: 5000)",
 308 | 				},
 309 | 			},
 310 | 			Required: []string{"query"},
 311 | 		},
 312 | 		Handler: handleQuery,
 313 | 	})
 314 | 
 315 | 	// Register execute tool
 316 | 	registry.RegisterTool(&tools.Tool{
 317 | 		Name:        "dbExecute",
 318 | 		Description: "Execute a database statement that doesn't return results (INSERT, UPDATE, DELETE, etc.)",
 319 | 		InputSchema: tools.ToolInputSchema{
 320 | 			Type: "object",
 321 | 			Properties: map[string]interface{}{
 322 | 				"statement": map[string]interface{}{
 323 | 					"type":        "string",
 324 | 					"description": "SQL statement to execute",
 325 | 				},
 326 | 				"database": map[string]interface{}{
 327 | 					"type":        "string",
 328 | 					"description": "Database ID to query (optional if only one database is configured)",
 329 | 				},
 330 | 				"params": map[string]interface{}{
 331 | 					"type":        "array",
 332 | 					"description": "Parameters for the statement (for prepared statements)",
 333 | 					"items": map[string]interface{}{
 334 | 						"type": "string",
 335 | 					},
 336 | 				},
 337 | 				"timeout": map[string]interface{}{
 338 | 					"type":        "integer",
 339 | 					"description": "Statement timeout in milliseconds (default: 5000)",
 340 | 				},
 341 | 			},
 342 | 			Required: []string{"statement"},
 343 | 		},
 344 | 		Handler: handleExecute,
 345 | 	})
 346 | 
 347 | 	// Register list databases tool
 348 | 	registry.RegisterTool(&tools.Tool{
 349 | 		Name:        "dbList",
 350 | 		Description: "List all available database connections",
 351 | 		InputSchema: tools.ToolInputSchema{
 352 | 			Type: "object",
 353 | 			Properties: map[string]interface{}{
 354 | 				"showStatus": map[string]interface{}{
 355 | 					"type":        "boolean",
 356 | 					"description": "Show connection status and latency",
 357 | 				},
 358 | 			},
 359 | 		},
 360 | 		Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 361 | 			// Show connection status?
 362 | 			showStatus, ok := params["showStatus"].(bool)
 363 | 			if ok && showStatus {
 364 | 				return showConnectedDatabases(ctx, params)
 365 | 			}
 366 | 
 367 | 			// Just list database IDs
 368 | 			return ListDatabases(), nil
 369 | 		},
 370 | 	})
 371 | 
 372 | 	// Register query builder tool
 373 | 	registry.RegisterTool(&tools.Tool{
 374 | 		Name:        "dbQueryBuilder",
 375 | 		Description: "Build SQL queries visually",
 376 | 		InputSchema: tools.ToolInputSchema{
 377 | 			Type: "object",
 378 | 			Properties: map[string]interface{}{
 379 | 				"action": map[string]interface{}{
 380 | 					"type":        "string",
 381 | 					"description": "Action to perform (build, validate, format)",
 382 | 					"enum":        []string{"build", "validate", "format"},
 383 | 				},
 384 | 				"query": map[string]interface{}{
 385 | 					"type":        "string",
 386 | 					"description": "SQL query to validate or format",
 387 | 				},
 388 | 				"database": map[string]interface{}{
 389 | 					"type":        "string",
 390 | 					"description": "Database ID to use for validation",
 391 | 				},
 392 | 				"components": map[string]interface{}{
 393 | 					"type":        "object",
 394 | 					"description": "Query components (for build action)",
 395 | 				},
 396 | 			},
 397 | 			Required: []string{"action"},
 398 | 		},
 399 | 		Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 400 | 			// Just a placeholder for now
 401 | 			actionVal, ok := params["action"].(string)
 402 | 			if !ok {
 403 | 				return nil, fmt.Errorf("missing or invalid 'action' parameter")
 404 | 			}
 405 | 			return fmt.Sprintf("Query builder %s action not implemented yet", actionVal), nil
 406 | 		},
 407 | 	})
 408 | 
 409 | 	// Register Cursor-compatible tool handlers
 410 | 	// TODO: Implement or import this function
 411 | 	// tools.RegisterCursorCompatibleToolHandlers(registry)
 412 | 
 413 | 	return nil
 414 | }
 415 | 
 416 | // PingDatabase pings a database to check the connection
 417 | func PingDatabase(db *sql.DB) error {
 418 | 	return db.Ping()
 419 | }
 420 | 
 421 | // rowsToMaps converts sql.Rows to a slice of maps
 422 | func rowsToMaps(rows *sql.Rows) ([]map[string]interface{}, error) {
 423 | 	// Get column names
 424 | 	columns, err := rows.Columns()
 425 | 	if err != nil {
 426 | 		return nil, err
 427 | 	}
 428 | 
 429 | 	// Make a slice for the values
 430 | 	values := make([]interface{}, len(columns))
 431 | 
 432 | 	// Create references for the values
 433 | 	valueRefs := make([]interface{}, len(columns))
 434 | 	for i := range columns {
 435 | 		valueRefs[i] = &values[i]
 436 | 	}
 437 | 
 438 | 	// Create the slice to store results
 439 | 	var results []map[string]interface{}
 440 | 
 441 | 	// Fetch rows
 442 | 	for rows.Next() {
 443 | 		// Scan the result into the pointers
 444 | 		err := rows.Scan(valueRefs...)
 445 | 		if err != nil {
 446 | 			return nil, err
 447 | 		}
 448 | 
 449 | 		// Create a map for this row
 450 | 		result := make(map[string]interface{})
 451 | 		for i, column := range columns {
 452 | 			val := values[i]
 453 | 
 454 | 			// Handle null values
 455 | 			if val == nil {
 456 | 				result[column] = nil
 457 | 				continue
 458 | 			}
 459 | 
 460 | 			// Convert bytes to string for easier JSON serialization
 461 | 			if b, ok := val.([]byte); ok {
 462 | 				result[column] = string(b)
 463 | 			} else {
 464 | 				result[column] = val
 465 | 			}
 466 | 		}
 467 | 
 468 | 		results = append(results, result)
 469 | 	}
 470 | 
 471 | 	if err := rows.Err(); err != nil {
 472 | 		return nil, err
 473 | 	}
 474 | 
 475 | 	return results, nil
 476 | }
 477 | 
 478 | // getStringParam safely extracts a string parameter from the params map
 479 | func getStringParam(params map[string]interface{}, key string) (string, bool) {
 480 | 	if val, ok := params[key].(string); ok {
 481 | 		return val, true
 482 | 	}
 483 | 	return "", false
 484 | }
 485 | 
 486 | // getIntParam safely extracts an int parameter from the params map
 487 | func getIntParam(params map[string]interface{}, key string) (int, bool) {
 488 | 	switch v := params[key].(type) {
 489 | 	case int:
 490 | 		return v, true
 491 | 	case float64:
 492 | 		return int(v), true
 493 | 	case int64:
 494 | 		return int(v), true
 495 | 	case json.Number:
 496 | 		if i, err := v.Int64(); err == nil {
 497 | 			return int(i), true
 498 | 		}
 499 | 	}
 500 | 	return 0, false
 501 | }
 502 | 
 503 | // getArrayParam safely extracts an array parameter from the params map
 504 | func getArrayParam(params map[string]interface{}, key string) ([]interface{}, bool) {
 505 | 	if val, ok := params[key].([]interface{}); ok {
 506 | 		return val, true
 507 | 	}
 508 | 	return nil, false
 509 | }
 510 | 
 511 | // _loadConfigFromFile loads database configuration from a file (currently unused)
 512 | func _loadConfigFromFile(cfg *Config) (*db.MultiDBConfig, error) {
 513 | 	if cfg.ConfigFile == "" {
 514 | 		return nil, fmt.Errorf("no config file specified")
 515 | 	}
 516 | 
 517 | 	// If path is not absolute, make it absolute
 518 | 	absPath := cfg.ConfigFile
 519 | 	if !filepath.IsAbs(absPath) {
 520 | 		var err error
 521 | 		absPath, err = filepath.Abs(absPath)
 522 | 		if err != nil {
 523 | 			return nil, fmt.Errorf("failed to resolve absolute path: %w", err)
 524 | 		}
 525 | 	}
 526 | 
 527 | 	// Read configuration file
 528 | 	configData, err := os.ReadFile(absPath)
 529 | 	if err != nil {
 530 | 		logger.Warn("Warning: failed to read config file %s: %v", cfg.ConfigFile, err)
 531 | 		return nil, err
 532 | 	}
 533 | 
 534 | 	// Parse JSON
 535 | 	var dbConfig db.MultiDBConfig
 536 | 	if err := json.Unmarshal(configData, &dbConfig); err != nil {
 537 | 		logger.Warn("Warning: failed to parse config file %s: %v", cfg.ConfigFile, err)
 538 | 		return nil, err
 539 | 	}
 540 | 	logger.Info("Loaded database config from file: %s", cfg.ConfigFile)
 541 | 	// Debug logging of connection details
 542 | 	for i, conn := range dbConfig.Connections {
 543 | 		logger.Info("Connection [%d]: ID=%s, Type=%s, Host=%s, Port=%d, Name=%s",
 544 | 			i, conn.ID, conn.Type, conn.Host, conn.Port, conn.Name)
 545 | 	}
 546 | 
 547 | 	return &dbConfig, nil
 548 | }
 549 | 
 550 | // _getEnv gets an environment variable or returns a default value (currently unused)
 551 | func _getEnv(key, defaultValue string) string {
 552 | 	value := os.Getenv(key)
 553 | 	if value == "" {
 554 | 		return defaultValue
 555 | 	}
 556 | 	return value
 557 | }
 558 | 
 559 | // _getIntEnv gets an environment variable as an integer or returns a default value (currently unused)
 560 | func _getIntEnv(key string, defaultValue int) int {
 561 | 	value := os.Getenv(key)
 562 | 	if value == "" {
 563 | 		return defaultValue
 564 | 	}
 565 | 	intValue, err := strconv.Atoi(value)
 566 | 	if err != nil {
 567 | 		return defaultValue
 568 | 	}
 569 | 	return intValue
 570 | }
 571 | 
 572 | // _loadConfigFromEnv loads database configuration from the environment (currently unused)
 573 | func _loadConfigFromEnv() (*db.MultiDBConfig, error) {
 574 | 	// Check if DB_CONFIG environment variable is set
 575 | 	dbConfigEnv := os.Getenv("DB_CONFIG")
 576 | 	if dbConfigEnv != "" {
 577 | 		var dbConfig db.MultiDBConfig
 578 | 		if err := json.Unmarshal([]byte(dbConfigEnv), &dbConfig); err != nil {
 579 | 			logger.Warn("Warning: failed to parse DB_CONFIG environment variable: %v", err)
 580 | 			return nil, err
 581 | 		}
 582 | 		logger.Info("Loaded database config from DB_CONFIG environment variable")
 583 | 		return &dbConfig, nil
 584 | 	}
 585 | 
 586 | 	// Create config from individual environment variables
 587 | 	// Load database configuration from environment variables
 588 | 	dbType := _getEnv("DB_TYPE", "mysql")
 589 | 	dbHost := _getEnv("DB_HOST", "localhost")
 590 | 	dbPort := _getIntEnv("DB_PORT", 3306)
 591 | 	dbUser := _getEnv("DB_USER", "")
 592 | 	dbPass := _getEnv("DB_PASSWORD", "")
 593 | 	dbName := _getEnv("DB_NAME", "")
 594 | 
 595 | 	// Create a default configuration with a single connection
 596 | 	dbConfig := &db.MultiDBConfig{
 597 | 		Connections: []db.DatabaseConnectionConfig{
 598 | 			{
 599 | 				ID:       "default",
 600 | 				Type:     dbType,
 601 | 				Host:     dbHost,
 602 | 				Port:     dbPort,
 603 | 				User:     dbUser,
 604 | 				Password: dbPass,
 605 | 				Name:     dbName,
 606 | 			},
 607 | 		},
 608 | 	}
 609 | 
 610 | 	logger.Info("Created database config from environment variables")
 611 | 	return dbConfig, nil
 612 | }
 613 | 
 614 | // GetDatabaseQueryTimeout returns the query timeout for a database in milliseconds
 615 | func GetDatabaseQueryTimeout(db db.Database) int {
 616 | 	// Get the query timeout from the database configuration
 617 | 	// Default to 30 seconds (30000ms) if not configured
 618 | 	defaultTimeout := 30000 // ms
 619 | 
 620 | 	if dbConfig, ok := db.(interface{ QueryTimeout() int }); ok {
 621 | 		if timeout := dbConfig.QueryTimeout(); timeout > 0 {
 622 | 			return timeout * 1000 // Convert from seconds to milliseconds
 623 | 		}
 624 | 	}
 625 | 
 626 | 	return defaultTimeout
 627 | }
 628 | 
 629 | // RegisterMCPDatabaseTools registers database tools specifically formatted for MCP compatibility
 630 | func RegisterMCPDatabaseTools(registry *tools.Registry) error {
 631 | 	// Get available databases
 632 | 	dbs := ListDatabases()
 633 | 
 634 | 	// If no databases are available, register mock tools
 635 | 	if len(dbs) == 0 {
 636 | 		return registerMCPMockTools(registry)
 637 | 	}
 638 | 
 639 | 	// Register MCP tools for each database
 640 | 	for _, dbID := range dbs {
 641 | 		// Register query tool for this database
 642 | 		registry.RegisterTool(&tools.Tool{
 643 | 			Name:        fmt.Sprintf("query_%s", dbID),
 644 | 			Description: fmt.Sprintf("Execute SQL query on %s database", dbID),
 645 | 			InputSchema: tools.ToolInputSchema{
 646 | 				Type: "object",
 647 | 				Properties: map[string]interface{}{
 648 | 					"query": map[string]interface{}{
 649 | 						"type":        "string",
 650 | 						"description": "SQL query to execute",
 651 | 					},
 652 | 					"params": map[string]interface{}{
 653 | 						"type":        "array",
 654 | 						"description": "Query parameters",
 655 | 						"items": map[string]interface{}{
 656 | 							"type": "string",
 657 | 						},
 658 | 					},
 659 | 				},
 660 | 				Required: []string{"query"},
 661 | 			},
 662 | 			Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 663 | 				return handleQueryForDatabase(ctx, params, dbID)
 664 | 			},
 665 | 		})
 666 | 
 667 | 		// Register execute tool for this database
 668 | 		registry.RegisterTool(&tools.Tool{
 669 | 			Name:        fmt.Sprintf("execute_%s", dbID),
 670 | 			Description: fmt.Sprintf("Execute SQL statement on %s database", dbID),
 671 | 			InputSchema: tools.ToolInputSchema{
 672 | 				Type: "object",
 673 | 				Properties: map[string]interface{}{
 674 | 					"statement": map[string]interface{}{
 675 | 						"type":        "string",
 676 | 						"description": "SQL statement to execute",
 677 | 					},
 678 | 					"params": map[string]interface{}{
 679 | 						"type":        "array",
 680 | 						"description": "Statement parameters",
 681 | 						"items": map[string]interface{}{
 682 | 							"type": "string",
 683 | 						},
 684 | 					},
 685 | 				},
 686 | 				Required: []string{"statement"},
 687 | 			},
 688 | 			Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 689 | 				return handleExecuteForDatabase(ctx, params, dbID)
 690 | 			},
 691 | 		})
 692 | 
 693 | 		// Register transaction tool for this database
 694 | 		registry.RegisterTool(&tools.Tool{
 695 | 			Name:        fmt.Sprintf("transaction_%s", dbID),
 696 | 			Description: fmt.Sprintf("Manage transactions on %s database", dbID),
 697 | 			InputSchema: tools.ToolInputSchema{
 698 | 				Type: "object",
 699 | 				Properties: map[string]interface{}{
 700 | 					"action": map[string]interface{}{
 701 | 						"type":        "string",
 702 | 						"description": "Transaction action (begin, commit, rollback, execute)",
 703 | 						"enum":        []string{"begin", "commit", "rollback", "execute"},
 704 | 					},
 705 | 					"transactionId": map[string]interface{}{
 706 | 						"type":        "string",
 707 | 						"description": "Transaction ID (required for commit, rollback, execute)",
 708 | 					},
 709 | 					"statement": map[string]interface{}{
 710 | 						"type":        "string",
 711 | 						"description": "SQL statement to execute within transaction (required for execute)",
 712 | 					},
 713 | 					"params": map[string]interface{}{
 714 | 						"type":        "array",
 715 | 						"description": "Statement parameters",
 716 | 						"items": map[string]interface{}{
 717 | 							"type": "string",
 718 | 						},
 719 | 					},
 720 | 					"readOnly": map[string]interface{}{
 721 | 						"type":        "boolean",
 722 | 						"description": "Whether the transaction is read-only (for begin)",
 723 | 					},
 724 | 				},
 725 | 				Required: []string{"action"},
 726 | 			},
 727 | 			Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 728 | 				return handleTransactionForDatabase(ctx, params, dbID)
 729 | 			},
 730 | 		})
 731 | 
 732 | 		// Register performance tool for this database
 733 | 		registry.RegisterTool(&tools.Tool{
 734 | 			Name:        fmt.Sprintf("performance_%s", dbID),
 735 | 			Description: fmt.Sprintf("Analyze query performance on %s database", dbID),
 736 | 			InputSchema: tools.ToolInputSchema{
 737 | 				Type: "object",
 738 | 				Properties: map[string]interface{}{
 739 | 					"action": map[string]interface{}{
 740 | 						"type":        "string",
 741 | 						"description": "Action (getSlowQueries, getMetrics, analyzeQuery, reset, setThreshold)",
 742 | 						"enum":        []string{"getSlowQueries", "getMetrics", "analyzeQuery", "reset", "setThreshold"},
 743 | 					},
 744 | 					"query": map[string]interface{}{
 745 | 						"type":        "string",
 746 | 						"description": "SQL query to analyze (required for analyzeQuery)",
 747 | 					},
 748 | 					"threshold": map[string]interface{}{
 749 | 						"type":        "number",
 750 | 						"description": "Slow query threshold in milliseconds (required for setThreshold)",
 751 | 					},
 752 | 					"limit": map[string]interface{}{
 753 | 						"type":        "number",
 754 | 						"description": "Maximum number of results to return",
 755 | 					},
 756 | 				},
 757 | 				Required: []string{"action"},
 758 | 			},
 759 | 			Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 760 | 				return handlePerformanceForDatabase(ctx, params, dbID)
 761 | 			},
 762 | 		})
 763 | 
 764 | 		// Register schema tool for this database
 765 | 		registry.RegisterTool(&tools.Tool{
 766 | 			Name:        fmt.Sprintf("schema_%s", dbID),
 767 | 			Description: fmt.Sprintf("Get schema of on %s database", dbID),
 768 | 			InputSchema: tools.ToolInputSchema{
 769 | 				Type: "object",
 770 | 				Properties: map[string]interface{}{
 771 | 					"random_string": map[string]interface{}{
 772 | 						"type":        "string",
 773 | 						"description": "Dummy parameter (optional)",
 774 | 					},
 775 | 				},
 776 | 			},
 777 | 			Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 778 | 				return handleSchemaForDatabase(ctx, params, dbID)
 779 | 			},
 780 | 		})
 781 | 	}
 782 | 
 783 | 	// Register list_databases tool
 784 | 	registry.RegisterTool(&tools.Tool{
 785 | 		Name:        "list_databases",
 786 | 		Description: "List all available databases on  database",
 787 | 		InputSchema: tools.ToolInputSchema{
 788 | 			Type: "object",
 789 | 			Properties: map[string]interface{}{
 790 | 				"random_string": map[string]interface{}{
 791 | 					"type":        "string",
 792 | 					"description": "Dummy parameter (optional)",
 793 | 				},
 794 | 			},
 795 | 		},
 796 | 		Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 797 | 			dbs := ListDatabases()
 798 | 			output := "Available databases:\n\n"
 799 | 			for i, db := range dbs {
 800 | 				output += fmt.Sprintf("%d. %s\n", i+1, db)
 801 | 			}
 802 | 			if len(dbs) == 0 {
 803 | 				output += "No databases configured.\n"
 804 | 			}
 805 | 			return map[string]interface{}{
 806 | 				"content": []map[string]interface{}{
 807 | 					{"type": "text", "text": output},
 808 | 				},
 809 | 			}, nil
 810 | 		},
 811 | 	})
 812 | 
 813 | 	return nil
 814 | }
 815 | 
 816 | // Helper function to create mock tools for MCP compatibility
 817 | func registerMCPMockTools(registry *tools.Registry) error {
 818 | 	mockDBID := "mock"
 819 | 
 820 | 	// Register mock query tool
 821 | 	registry.RegisterTool(&tools.Tool{
 822 | 		Name:        fmt.Sprintf("query_%s", mockDBID),
 823 | 		Description: fmt.Sprintf("Execute SQL query on %s database", mockDBID),
 824 | 		InputSchema: tools.ToolInputSchema{
 825 | 			Type: "object",
 826 | 			Properties: map[string]interface{}{
 827 | 				"query": map[string]interface{}{
 828 | 					"type":        "string",
 829 | 					"description": "SQL query to execute",
 830 | 				},
 831 | 				"params": map[string]interface{}{
 832 | 					"type":        "array",
 833 | 					"description": "Query parameters",
 834 | 					"items": map[string]interface{}{
 835 | 						"type": "string",
 836 | 					},
 837 | 				},
 838 | 			},
 839 | 			Required: []string{"query"},
 840 | 		},
 841 | 		Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 842 | 			query, _ := getStringParam(params, "query")
 843 | 			return map[string]interface{}{
 844 | 				"content": []map[string]interface{}{
 845 | 					{
 846 | 						"type": "text",
 847 | 						"text": fmt.Sprintf("Mock query executed:\n%s\n\nThis is a mock response.", query),
 848 | 					},
 849 | 				},
 850 | 				"mock": true,
 851 | 			}, nil
 852 | 		},
 853 | 	})
 854 | 
 855 | 	// Register list_databases tool
 856 | 	registry.RegisterTool(&tools.Tool{
 857 | 		Name:        "list_databases",
 858 | 		Description: "List all available databases on database",
 859 | 		InputSchema: tools.ToolInputSchema{
 860 | 			Type: "object",
 861 | 			Properties: map[string]interface{}{
 862 | 				"random_string": map[string]interface{}{
 863 | 					"type":        "string",
 864 | 					"description": "Dummy parameter (optional)",
 865 | 				},
 866 | 			},
 867 | 		},
 868 | 		Handler: func(ctx context.Context, params map[string]interface{}) (interface{}, error) {
 869 | 			return map[string]interface{}{
 870 | 				"content": []map[string]interface{}{
 871 | 					{
 872 | 						"type": "text",
 873 | 						"text": "Available databases:\n\n1. mock (not connected)\n",
 874 | 					},
 875 | 				},
 876 | 				"mock": true,
 877 | 			}, nil
 878 | 		},
 879 | 	})
 880 | 
 881 | 	return nil
 882 | }
 883 | 
 884 | // Handler functions for specific databases
 885 | func handleQueryForDatabase(ctx context.Context, params map[string]interface{}, dbID string) (interface{}, error) {
 886 | 	query, _ := getStringParam(params, "query")
 887 | 	paramList, hasParams := getArrayParam(params, "params")
 888 | 
 889 | 	var queryParams []interface{}
 890 | 	if hasParams {
 891 | 		queryParams = paramList
 892 | 	}
 893 | 
 894 | 	result, err := executeQueryWithParams(ctx, dbID, query, queryParams)
 895 | 	if err != nil {
 896 | 		return createErrorResponse(fmt.Sprintf("Error executing query on %s: %v", dbID, err)), nil
 897 | 	}
 898 | 
 899 | 	return map[string]interface{}{
 900 | 		"content": []map[string]interface{}{
 901 | 			{"type": "text", "text": fmt.Sprintf("Results:\n\n%s", result)},
 902 | 		},
 903 | 	}, nil
 904 | }
 905 | 
 906 | func handleExecuteForDatabase(ctx context.Context, params map[string]interface{}, dbID string) (interface{}, error) {
 907 | 	statement, _ := getStringParam(params, "statement")
 908 | 	paramList, hasParams := getArrayParam(params, "params")
 909 | 
 910 | 	var stmtParams []interface{}
 911 | 	if hasParams {
 912 | 		stmtParams = paramList
 913 | 	}
 914 | 
 915 | 	result, err := executeStatementWithParams(ctx, dbID, statement, stmtParams)
 916 | 	if err != nil {
 917 | 		return createErrorResponse(fmt.Sprintf("Error executing statement on %s: %v", dbID, err)), nil
 918 | 	}
 919 | 
 920 | 	return map[string]interface{}{
 921 | 		"content": []map[string]interface{}{
 922 | 			{"type": "text", "text": result},
 923 | 		},
 924 | 	}, nil
 925 | }
 926 | 
 927 | func handleTransactionForDatabase(ctx context.Context, params map[string]interface{}, dbID string) (interface{}, error) {
 928 | 	action, _ := getStringParam(params, "action")
 929 | 	txID, hasTxID := getStringParam(params, "transactionId")
 930 | 	statement, hasStatement := getStringParam(params, "statement")
 931 | 
 932 | 	// Fix: properly handle type assertion
 933 | 	readOnly := false
 934 | 	if val, ok := params["readOnly"].(bool); ok {
 935 | 		readOnly = val
 936 | 	}
 937 | 
 938 | 	paramList, hasParams := getArrayParam(params, "params")
 939 | 	var stmtParams []interface{}
 940 | 	if hasParams {
 941 | 		stmtParams = paramList
 942 | 	}
 943 | 
 944 | 	switch action {
 945 | 	case "begin":
 946 | 		// Generate transaction ID if not provided
 947 | 		if !hasTxID {
 948 | 			txID = fmt.Sprintf("tx_%s_%d", dbID, time.Now().Unix())
 949 | 		}
 950 | 
 951 | 		// Start transaction
 952 | 		db, err := GetDatabase(dbID)
 953 | 		if err != nil {
 954 | 			return createErrorResponse(fmt.Sprintf("Failed to get database %s: %v", dbID, err)), nil
 955 | 		}
 956 | 
 957 | 		// Set read-only option if specified
 958 | 		var opts *sql.TxOptions
 959 | 		if readOnly {
 960 | 			opts = &sql.TxOptions{ReadOnly: true}
 961 | 		}
 962 | 
 963 | 		tx, err := db.BeginTx(ctx, opts)
 964 | 		if err != nil {
 965 | 			return createErrorResponse(fmt.Sprintf("Failed to begin transaction: %v", err)), nil
 966 | 		}
 967 | 
 968 | 		// Store transaction
 969 | 		if err := storeTransaction(txID, tx); err != nil {
 970 | 			return createErrorResponse(fmt.Sprintf("Failed to store transaction: %v", err)), nil
 971 | 		}
 972 | 
 973 | 		return map[string]interface{}{
 974 | 			"content": []map[string]interface{}{
 975 | 				{"type": "text", "text": "Transaction started"},
 976 | 			},
 977 | 			"metadata": map[string]interface{}{
 978 | 				"transactionId": txID,
 979 | 			},
 980 | 		}, nil
 981 | 
 982 | 	case "commit":
 983 | 		if !hasTxID {
 984 | 			return createErrorResponse("transactionId is required for commit action"), nil
 985 | 		}
 986 | 
 987 | 		tx, err := getTransaction(txID)
 988 | 		if err != nil {
 989 | 			return createErrorResponse(fmt.Sprintf("Failed to get transaction %s: %v", txID, err)), nil
 990 | 		}
 991 | 
 992 | 		if err := tx.Commit(); err != nil {
 993 | 			return createErrorResponse(fmt.Sprintf("Failed to commit transaction: %v", err)), nil
 994 | 		}
 995 | 
 996 | 		// Remove transaction
 997 | 		removeTransaction(txID)
 998 | 
 999 | 		return map[string]interface{}{
1000 | 			"content": []map[string]interface{}{
1001 | 				{"type": "text", "text": "Transaction committed"},
1002 | 			},
1003 | 		}, nil
1004 | 
1005 | 	case "rollback":
1006 | 		if !hasTxID {
1007 | 			return createErrorResponse("transactionId is required for rollback action"), nil
1008 | 		}
1009 | 
1010 | 		tx, err := getTransaction(txID)
1011 | 		if err != nil {
1012 | 			return createErrorResponse(fmt.Sprintf("Failed to get transaction %s: %v", txID, err)), nil
1013 | 		}
1014 | 
1015 | 		if err := tx.Rollback(); err != nil {
1016 | 			return createErrorResponse(fmt.Sprintf("Failed to rollback transaction: %v", err)), nil
1017 | 		}
1018 | 
1019 | 		// Remove transaction
1020 | 		removeTransaction(txID)
1021 | 
1022 | 		return map[string]interface{}{
1023 | 			"content": []map[string]interface{}{
1024 | 				{"type": "text", "text": "Transaction rolled back"},
1025 | 			},
1026 | 		}, nil
1027 | 
1028 | 	case "execute":
1029 | 		if !hasTxID {
1030 | 			return createErrorResponse("transactionId is required for execute action"), nil
1031 | 		}
1032 | 
1033 | 		if !hasStatement {
1034 | 			return createErrorResponse("statement is required for execute action"), nil
1035 | 		}
1036 | 
1037 | 		tx, err := getTransaction(txID)
1038 | 		if err != nil {
1039 | 			return createErrorResponse(fmt.Sprintf("Failed to get transaction %s: %v", txID, err)), nil
1040 | 		}
1041 | 
1042 | 		// Execute statement
1043 | 		_, err = tx.Exec(statement, stmtParamsToInterfaceSlice(stmtParams)...)
1044 | 		if err != nil {
1045 | 			return createErrorResponse(fmt.Sprintf("Failed to execute statement in transaction: %v", err)), nil
1046 | 		}
1047 | 
1048 | 		return map[string]interface{}{
1049 | 			"content": []map[string]interface{}{
1050 | 				{"type": "text", "text": "Statement executed in transaction"},
1051 | 			},
1052 | 		}, nil
1053 | 
1054 | 	default:
1055 | 		return createErrorResponse(fmt.Sprintf("Unknown transaction action: %s", action)), nil
1056 | 	}
1057 | }
1058 | 
1059 | func handlePerformanceForDatabase(ctx context.Context, params map[string]interface{}, dbID string) (interface{}, error) {
1060 | 	action, _ := getStringParam(params, "action")
1061 | 
1062 | 	// Create response with basic info about the action
1063 | 	limitVal, hasLimit := params["limit"].(float64)
1064 | 	limit := 10
1065 | 	if hasLimit {
1066 | 		limit = int(limitVal)
1067 | 	}
1068 | 
1069 | 	// Return a basic mock response for the performance action
1070 | 	return map[string]interface{}{
1071 | 		"content": []map[string]interface{}{
1072 | 			{
1073 | 				"type": "text",
1074 | 				"text": fmt.Sprintf("Performance analysis for action '%s' on database '%s'\nLimit: %d\n", action, dbID, limit),
1075 | 			},
1076 | 		},
1077 | 	}, nil
1078 | }
1079 | 
1080 | func handleSchemaForDatabase(ctx context.Context, params map[string]interface{}, dbID string) (interface{}, error) {
1081 | 	// Try to get database schema
1082 | 	db, err := GetDatabase(dbID)
1083 | 	if err != nil {
1084 | 		return createErrorResponse(fmt.Sprintf("Failed to get database %s: %v", dbID, err)), nil
1085 | 	}
1086 | 
1087 | 	// Get database type for more accurate schema reporting
1088 | 	var dbType string
1089 | 	switch db.DriverName() {
1090 | 	case "mysql":
1091 | 		dbType = "mysql"
1092 | 	case "postgres":
1093 | 		dbType = "postgres"
1094 | 	default:
1095 | 		dbType = "unknown"
1096 | 	}
1097 | 
1098 | 	// Get schema information
1099 | 	schema := getBasicSchemaInfo(ctx, db, dbID, dbType)
1100 | 
1101 | 	return map[string]interface{}{
1102 | 		"content": []map[string]interface{}{
1103 | 			{
1104 | 				"type": "text",
1105 | 				"text": fmt.Sprintf("Database Schema for %s:\n\n%v", dbID, schema),
1106 | 			},
1107 | 		},
1108 | 	}, nil
1109 | }
1110 | 
1111 | // Helper function to get basic schema info
1112 | func getBasicSchemaInfo(ctx context.Context, db db.Database, dbID, dbType string) map[string]interface{} {
1113 | 	result := map[string]interface{}{
1114 | 		"database": dbID,
1115 | 		"dbType":   dbType,
1116 | 		"tables":   []map[string]string{},
1117 | 	}
1118 | 
1119 | 	// Try to get table list - a simple query that should work on most databases
1120 | 	var query string
1121 | 	switch dbType {
1122 | 	case "mysql":
1123 | 		query = "SHOW TABLES"
1124 | 	case "postgres":
1125 | 		query = "SELECT tablename AS TABLE_NAME FROM pg_catalog.pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')"
1126 | 	default:
1127 | 		// Generic query that might work
1128 | 		query = "SELECT name FROM sqlite_master WHERE type='table'"
1129 | 	}
1130 | 
1131 | 	rows, err := db.Query(ctx, query)
1132 | 	if err != nil {
1133 | 		// Return empty schema if query fails
1134 | 		return result
1135 | 	}
1136 | 	defer func() {
1137 | 		if cerr := rows.Close(); cerr != nil {
1138 | 			logger.Warn("Error closing rows: %v", cerr)
1139 | 		}
1140 | 	}()
1141 | 
1142 | 	tables := []map[string]string{}
1143 | 	for rows.Next() {
1144 | 		var tableName string
1145 | 		if err := rows.Scan(&tableName); err != nil {
1146 | 			continue
1147 | 		}
1148 | 		tables = append(tables, map[string]string{"TABLE_NAME": tableName})
1149 | 	}
1150 | 
1151 | 	result["tables"] = tables
1152 | 	return result
1153 | }
1154 | 
1155 | // Helper functions for parameter conversion
1156 | func stmtParamsToInterfaceSlice(params []interface{}) []interface{} {
1157 | 	result := make([]interface{}, len(params))
1158 | 	copy(result, params)
1159 | 	return result
1160 | }
1161 | 
1162 | func createErrorResponse(message string) map[string]interface{} {
1163 | 	return map[string]interface{}{
1164 | 		"content": []map[string]interface{}{
1165 | 			{"type": "text", "text": fmt.Sprintf("Error: %s", message)},
1166 | 		},
1167 | 		"isError": true,
1168 | 	}
1169 | }
1170 | 
1171 | // executeQueryWithParams executes a query with the given parameters
1172 | func executeQueryWithParams(ctx context.Context, dbID, query string, params []interface{}) (string, error) {
1173 | 	db, err := GetDatabase(dbID)
1174 | 	if err != nil {
1175 | 		return "", fmt.Errorf("failed to get database %s: %w", dbID, err)
1176 | 	}
1177 | 
1178 | 	rows, err := db.Query(ctx, query, params...)
1179 | 	if err != nil {
1180 | 		return "", fmt.Errorf("failed to execute query: %w", err)
1181 | 	}
1182 | 	defer func() {
1183 | 		if cerr := rows.Close(); cerr != nil {
1184 | 			logger.Warn("Error closing rows: %v", cerr)
1185 | 		}
1186 | 	}()
1187 | 
1188 | 	// Convert rows to string representation
1189 | 	result, err := formatRows(rows)
1190 | 	if err != nil {
1191 | 		return "", fmt.Errorf("failed to format rows: %w", err)
1192 | 	}
1193 | 
1194 | 	return result, nil
1195 | }
1196 | 
1197 | // executeStatementWithParams executes a statement with the given parameters
1198 | func executeStatementWithParams(ctx context.Context, dbID, statement string, params []interface{}) (string, error) {
1199 | 	db, err := GetDatabase(dbID)
1200 | 	if err != nil {
1201 | 		return "", fmt.Errorf("failed to get database %s: %w", dbID, err)
1202 | 	}
1203 | 
1204 | 	result, err := db.Exec(ctx, statement, params...)
1205 | 	if err != nil {
1206 | 		return "", fmt.Errorf("failed to execute statement: %w", err)
1207 | 	}
1208 | 
1209 | 	// Get affected rows
1210 | 	rowsAffected, err := result.RowsAffected()
1211 | 	if err != nil {
1212 | 		rowsAffected = 0
1213 | 	}
1214 | 
1215 | 	// Get last insert ID (might not be supported by all databases)
1216 | 	lastInsertID, err := result.LastInsertId()
1217 | 	if err != nil {
1218 | 		lastInsertID = 0
1219 | 	}
1220 | 
1221 | 	return fmt.Sprintf("Statement executed successfully.\nRows affected: %d\nLast insert ID: %d", rowsAffected, lastInsertID), nil
1222 | }
1223 | 
1224 | // storeTransaction stores a transaction with the given ID
1225 | func storeTransaction(id string, tx *sql.Tx) error {
1226 | 	// Check if transaction already exists
1227 | 	_, exists := GetTransaction(id)
1228 | 	if exists {
1229 | 		return fmt.Errorf("transaction with ID %s already exists", id)
1230 | 	}
1231 | 
1232 | 	StoreTransaction(id, tx)
1233 | 	return nil
1234 | }
1235 | 
1236 | // getTransaction retrieves a transaction by ID
1237 | func getTransaction(id string) (*sql.Tx, error) {
1238 | 	tx, exists := GetTransaction(id)
1239 | 	if !exists {
1240 | 		return nil, fmt.Errorf("transaction with ID %s not found", id)
1241 | 	}
1242 | 
1243 | 	return tx, nil
1244 | }
1245 | 
1246 | // removeTransaction removes a transaction from storage
1247 | func removeTransaction(id string) {
1248 | 	RemoveTransaction(id)
1249 | }
1250 | 
1251 | // formatRows formats SQL rows as a string table
1252 | func formatRows(rows *sql.Rows) (string, error) {
1253 | 	// Get column names
1254 | 	columns, err := rows.Columns()
1255 | 	if err != nil {
1256 | 		return "", err
1257 | 	}
1258 | 
1259 | 	// Prepare column value holders
1260 | 	values := make([]interface{}, len(columns))
1261 | 	valuePtrs := make([]interface{}, len(columns))
1262 | 	for i := range columns {
1263 | 		valuePtrs[i] = &values[i]
1264 | 	}
1265 | 
1266 | 	// Build header
1267 | 	var sb strings.Builder
1268 | 	for i, col := range columns {
1269 | 		if i > 0 {
1270 | 			sb.WriteString("\t")
1271 | 		}
1272 | 		sb.WriteString(col)
1273 | 	}
1274 | 	sb.WriteString("\n")
1275 | 
1276 | 	// Add separator
1277 | 	sb.WriteString(strings.Repeat("-", 80))
1278 | 	sb.WriteString("\n")
1279 | 
1280 | 	// Process rows
1281 | 	rowCount := 0
1282 | 	for rows.Next() {
1283 | 		rowCount++
1284 | 		if err := rows.Scan(valuePtrs...); err != nil {
1285 | 			return "", err
1286 | 		}
1287 | 
1288 | 		// Format row values
1289 | 		for i, val := range values {
1290 | 			if i > 0 {
1291 | 				sb.WriteString("\t")
1292 | 			}
1293 | 			sb.WriteString(formatValue(val))
1294 | 		}
1295 | 		sb.WriteString("\n")
1296 | 	}
1297 | 
1298 | 	if err := rows.Err(); err != nil {
1299 | 		return "", err
1300 | 	}
1301 | 
1302 | 	// Add total row count
1303 | 	sb.WriteString(fmt.Sprintf("\nTotal rows: %d", rowCount))
1304 | 	return sb.String(), nil
1305 | }
1306 | 
1307 | // formatValue converts a value to string representation
1308 | func formatValue(val interface{}) string {
1309 | 	if val == nil {
1310 | 		return "NULL"
1311 | 	}
1312 | 
1313 | 	switch v := val.(type) {
1314 | 	case []byte:
1315 | 		return string(v)
1316 | 	case time.Time:
1317 | 		return v.String()
1318 | 	default:
1319 | 		return fmt.Sprintf("%v", v)
1320 | 	}
1321 | }
1322 | 
```
Page 5/7FirstPrevNextLast