This is page 1 of 2. Use http://codebase.md/winor30/mcp-server-datadog?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .github
│ ├── CODEOWNERS
│ └── workflows
│ ├── ci.yml
│ └── publish.yml
├── .gitignore
├── .husky
│ └── pre-commit
├── .prettierignore
├── .prettierrc
├── Dockerfile
├── eslint.config.js
├── jest.config.ts
├── LICENSE
├── package.json
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── README.md
├── smithery.yaml
├── src
│ ├── index.ts
│ ├── tools
│ │ ├── dashboards
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── downtimes
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── hosts
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── incident
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── logs
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── metrics
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── monitors
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ ├── rum
│ │ │ ├── index.ts
│ │ │ ├── schema.ts
│ │ │ └── tool.ts
│ │ └── traces
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ └── utils
│ ├── datadog.ts
│ ├── helper.ts
│ ├── tool.ts
│ └── types.ts
├── tests
│ ├── helpers
│ │ ├── datadog.ts
│ │ ├── mock.ts
│ │ └── msw.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── dashboards.test.ts
│ │ ├── downtimes.test.ts
│ │ ├── hosts.test.ts
│ │ ├── incident.test.ts
│ │ ├── logs.test.ts
│ │ ├── metrics.test.ts
│ │ ├── monitors.test.ts
│ │ ├── rum.test.ts
│ │ └── traces.test.ts
│ └── utils
│ ├── datadog.test.ts
│ └── tool.test.ts
├── tsconfig.json
├── tsup.config.ts
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
```
1 | pnpm-lock.yaml
2 |
```
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
```
1 | {
2 | "singleQuote": true,
3 | "semi": false,
4 | "useTabs": false,
5 | "trailingComma": "all",
6 | "printWidth": 80
7 | }
8 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Logs
2 | *.log
3 | npm-debug.log*
4 | yarn-debug.log*
5 | yarn-error.log*
6 | lerna-debug.log*
7 | .pnpm-debug.log*
8 |
9 | # Diagnostic reports (https://nodejs.org/api/report.html)
10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
11 |
12 | # Runtime data
13 | pids
14 | *.pid
15 | *.seed
16 | *.pid.lock
17 |
18 | # Directory for instrumented libs generated by jscoverage/JSCover
19 | lib-cov
20 |
21 | # Coverage directory used by tools like istanbul
22 | coverage
23 | *.lcov
24 |
25 | # nyc test coverage
26 | .nyc_output
27 |
28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
29 | .grunt
30 |
31 | # Bower dependency directory (https://bower.io/)
32 | bower_components
33 |
34 | # node-waf configuration
35 | .lock-wscript
36 |
37 | # Compiled binary addons (https://nodejs.org/api/addons.html)
38 | build/
39 |
40 | # Dependency directories
41 | node_modules/
42 | jspm_packages/
43 |
44 | # Snowpack dependency directory (https://snowpack.dev/)
45 | web_modules/
46 |
47 | # TypeScript cache
48 | *.tsbuildinfo
49 |
50 | # Optional npm cache directory
51 | .npm
52 |
53 | # Optional eslint cache
54 | .eslintcache
55 |
56 | # Optional stylelint cache
57 | .stylelintcache
58 |
59 | # Microbundle cache
60 | .rpt2_cache/
61 | .rts2_cache_cjs/
62 | .rts2_cache_es/
63 | .rts2_cache_umd/
64 |
65 | # Optional REPL history
66 | .node_repl_history
67 |
68 | # Output of 'npm pack'
69 | *.tgz
70 |
71 | # Yarn Integrity file
72 | .yarn-integrity
73 |
74 | # dotenv environment variable files
75 | .env
76 | .env.development.local
77 | .env.test.local
78 | .env.production.local
79 | .env.local
80 |
81 | # parcel-bundler cache (https://parceljs.org/)
82 | .cache
83 | .parcel-cache
84 |
85 | # Next.js build output
86 | .next
87 | out
88 |
89 | # Nuxt.js build / generate output
90 | .nuxt
91 | dist
92 |
93 | # Gatsby files
94 | .cache/
95 | # Comment in the public line in if your project uses Gatsby and not Next.js
96 | # https://nextjs.org/blog/next-9-1#public-directory-support
97 | # public
98 |
99 | # vuepress build output
100 | .vuepress/dist
101 |
102 | # vuepress v2.x temp and cache directory
103 | .temp
104 | .cache
105 |
106 | # Docusaurus cache and generated files
107 | .docusaurus
108 |
109 | # Serverless directories
110 | .serverless/
111 |
112 | # FuseBox cache
113 | .fusebox/
114 |
115 | # DynamoDB Local files
116 | .dynamodb/
117 |
118 | # TernJS port file
119 | .tern-port
120 |
121 | # Stores VSCode versions used for testing VSCode extensions
122 | .vscode-test
123 |
124 | # yarn v2
125 | .yarn/cache
126 | .yarn/unplugged
127 | .yarn/build-state.yml
128 | .yarn/install-state.gz
129 | .pnp.*
130 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Datadog MCP Server
2 |
3 | > **DISCLAIMER**: This is a community-maintained project and is not officially affiliated with, endorsed by, or supported by Datadog, Inc. This MCP server utilizes the Datadog API but is developed independently as part of the [Model Context Protocol](https://github.com/modelcontextprotocol/servers) ecosystem.
4 |
5 | [](https://codecov.io/gh/winor30/mcp-server-datadog)[](https://smithery.ai/server/@winor30/mcp-server-datadog)
6 |
7 | MCP server for the Datadog API, enabling incident management and more.
8 |
9 | <a href="https://glama.ai/mcp/servers/bu8gtzkwfr">
10 | <img width="380" height="200" src="https://glama.ai/mcp/servers/bu8gtzkwfr/badge" alt="mcp-server-datadog MCP server" />
11 | </a>
12 |
13 | ## Features
14 |
15 | - **Observability Tools**: Provides a mechanism to leverage key Datadog monitoring features, such as incidents, monitors, logs, dashboards, and metrics, through the MCP server.
16 | - **Extensible Design**: Designed to easily integrate with additional Datadog APIs, allowing for seamless future feature expansion.
17 |
18 | ## Tools
19 |
20 | 1. `list_incidents`
21 |
22 | - Retrieve a list of incidents from Datadog.
23 | - **Inputs**:
24 | - `filter` (optional string): Filter parameters for incidents (e.g., status, priority).
25 | - `pagination` (optional object): Pagination details like page size/offset.
26 | - **Returns**: Array of Datadog incidents and associated metadata.
27 |
28 | 2. `get_incident`
29 |
30 | - Retrieve detailed information about a specific Datadog incident.
31 | - **Inputs**:
32 | - `incident_id` (string): Incident ID to fetch details for.
33 | - **Returns**: Detailed incident information (title, status, timestamps, etc.).
34 |
35 | 3. `get_monitors`
36 |
37 | - Fetch the status of Datadog monitors.
38 | - **Inputs**:
39 | - `groupStates` (optional array): States to filter (e.g., alert, warn, no data, ok).
40 | - `name` (optional string): Filter by name.
41 | - `tags` (optional array): Filter by tags.
42 | - **Returns**: Monitors data and a summary of their statuses.
43 |
44 | 4. `get_logs`
45 |
46 | - Search and retrieve logs from Datadog.
47 | - **Inputs**:
48 | - `query` (string): Datadog logs query string.
49 | - `from` (number): Start time in epoch seconds.
50 | - `to` (number): End time in epoch seconds.
51 | - `limit` (optional number): Maximum number of logs to return (defaults to 100).
52 | - **Returns**: Array of matching logs.
53 |
54 | 5. `list_dashboards`
55 |
56 | - Get a list of dashboards from Datadog.
57 | - **Inputs**:
58 | - `name` (optional string): Filter dashboards by name.
59 | - `tags` (optional array): Filter dashboards by tags.
60 | - **Returns**: Array of dashboards with URL references.
61 |
62 | 6. `get_dashboard`
63 |
64 | - Retrieve a specific dashboard from Datadog.
65 | - **Inputs**:
66 | - `dashboard_id` (string): ID of the dashboard to fetch.
67 | - **Returns**: Dashboard details including title, widgets, etc.
68 |
69 | 7. `query_metrics`
70 |
71 | - Retrieve metrics data from Datadog.
72 | - **Inputs**:
73 | - `query` (string): Metrics query string.
74 | - `from` (number): Start time in epoch seconds.
75 | - `to` (number): End time in epoch seconds.
76 | - **Returns**: Metrics data for the queried timeframe.
77 |
78 | 8. `list_traces`
79 |
80 | - Retrieve a list of APM traces from Datadog.
81 | - **Inputs**:
82 | - `query` (string): Datadog APM trace query string.
83 | - `from` (number): Start time in epoch seconds.
84 | - `to` (number): End time in epoch seconds.
85 | - `limit` (optional number): Maximum number of traces to return (defaults to 100).
86 | - `sort` (optional string): Sort order for traces (defaults to '-timestamp').
87 | - `service` (optional string): Filter by service name.
88 | - `operation` (optional string): Filter by operation name.
89 | - **Returns**: Array of matching traces from Datadog APM.
90 |
91 | 9. `list_hosts`
92 |
93 | - Get list of hosts from Datadog.
94 | - **Inputs**:
95 | - `filter` (optional string): Filter string for search results.
96 | - `sort_field` (optional string): Field to sort hosts by.
97 | - `sort_dir` (optional string): Sort direction (asc/desc).
98 | - `start` (optional number): Starting offset for pagination.
99 | - `count` (optional number): Max number of hosts to return (max: 1000).
100 | - `from` (optional number): Search hosts from this UNIX timestamp.
101 | - `include_muted_hosts_data` (optional boolean): Include muted hosts status and expiry.
102 | - `include_hosts_metadata` (optional boolean): Include host metadata (version, platform, etc).
103 | - **Returns**: Array of hosts with details including name, ID, aliases, apps, mute status, and more.
104 |
105 | 10. `get_active_hosts_count`
106 |
107 | - Get the total number of active hosts in Datadog.
108 | - **Inputs**:
109 | - `from` (optional number): Number of seconds from which you want to get total number of active hosts (defaults to 2h).
110 | - **Returns**: Count of total active and up hosts.
111 |
112 | 11. `mute_host`
113 |
114 | - Mute a host in Datadog.
115 | - **Inputs**:
116 | - `hostname` (string): The name of the host to mute.
117 | - `message` (optional string): Message to associate with the muting of this host.
118 | - `end` (optional number): POSIX timestamp for when the mute should end.
119 | - `override` (optional boolean): If true and the host is already muted, replaces existing end time.
120 | - **Returns**: Success status and confirmation message.
121 |
122 | 12. `unmute_host`
123 |
124 | - Unmute a host in Datadog.
125 | - **Inputs**:
126 | - `hostname` (string): The name of the host to unmute.
127 | - **Returns**: Success status and confirmation message.
128 |
129 | 13. `list_downtimes`
130 |
131 | - List scheduled downtimes from Datadog.
132 | - **Inputs**:
133 | - `currentOnly` (optional boolean): Return only currently active downtimes when true.
134 | - `monitorId` (optional number): Filter by monitor ID.
135 | - **Returns**: Array of scheduled downtimes with details including scope, monitor information, and schedule.
136 |
137 | 14. `schedule_downtime`
138 |
139 | - Schedule a downtime in Datadog.
140 | - **Inputs**:
141 | - `scope` (string): Scope to apply downtime to (e.g. 'host:my-host').
142 | - `start` (optional number): UNIX timestamp for the start of the downtime.
143 | - `end` (optional number): UNIX timestamp for the end of the downtime.
144 | - `message` (optional string): A message to include with the downtime.
145 | - `timezone` (optional string): The timezone for the downtime (e.g. 'UTC', 'America/New_York').
146 | - `monitorId` (optional number): The ID of the monitor to mute.
147 | - `monitorTags` (optional array): A list of monitor tags for filtering.
148 | - `recurrence` (optional object): Recurrence settings for the downtime.
149 | - `type` (string): Recurrence type ('days', 'weeks', 'months', 'years').
150 | - `period` (number): How often to repeat (must be >= 1).
151 | - `weekDays` (optional array): Days of the week for weekly recurrence.
152 | - `until` (optional number): UNIX timestamp for when the recurrence ends.
153 | - **Returns**: Scheduled downtime details including ID and active status.
154 |
155 | 15. `cancel_downtime`
156 |
157 | - Cancel a scheduled downtime in Datadog.
158 | - **Inputs**:
159 | - `downtimeId` (number): The ID of the downtime to cancel.
160 | - **Returns**: Confirmation of downtime cancellation.
161 |
162 | 16. `get_rum_applications`
163 |
164 | - Get all RUM applications in the organization.
165 | - **Inputs**: None.
166 | - **Returns**: List of RUM applications.
167 |
168 | 17. `get_rum_events`
169 |
170 | - Search and retrieve RUM events from Datadog.
171 | - **Inputs**:
172 | - `query` (string): Datadog RUM query string.
173 | - `from` (number): Start time in epoch seconds.
174 | - `to` (number): End time in epoch seconds.
175 | - `limit` (optional number): Maximum number of events to return (default: 100).
176 | - **Returns**: Array of RUM events.
177 |
178 | 18. `get_rum_grouped_event_count`
179 |
180 | - Search, group and count RUM events by a specified dimension.
181 | - **Inputs**:
182 | - `query` (optional string): Additional query filter for RUM search (default: "\*").
183 | - `from` (number): Start time in epoch seconds.
184 | - `to` (number): End time in epoch seconds.
185 | - `groupBy` (optional string): Dimension to group results by (default: "application.name").
186 | - **Returns**: Grouped event counts.
187 |
188 | 19. `get_rum_page_performance`
189 |
190 | - Get page (view) performance metrics from RUM data.
191 | - **Inputs**:
192 | - `query` (optional string): Additional query filter for RUM search (default: "\*").
193 | - `from` (number): Start time in epoch seconds.
194 | - `to` (number): End time in epoch seconds.
195 | - `metricNames` (array of strings): Array of metric names to retrieve (e.g., 'view.load_time', 'view.first_contentful_paint').
196 | - **Returns**: Performance metrics including average, min, max, and count for each metric.
197 |
198 | 20. `get_rum_page_waterfall`
199 |
200 | - Retrieve RUM page (view) waterfall data filtered by application name and session ID.
201 | - **Inputs**:
202 | - `applicationName` (string): Application name to filter events.
203 | - `sessionId` (string): Session ID to filter events.
204 | - **Returns**: Waterfall data for the specified application and session.
205 |
206 | ## Setup
207 |
208 | ### Datadog Credentials
209 |
210 | You need valid Datadog API credentials to use this MCP server:
211 |
212 | - `DATADOG_API_KEY`: Your Datadog API key
213 | - `DATADOG_APP_KEY`: Your Datadog Application key
214 | - `DATADOG_SITE` (optional): The Datadog site (e.g. `datadoghq.eu`)
215 | - `DATADOG_SUBDOMAIN` (optional): The Datadog subdomain (e.g. `<your-subdomain>.datadoghq.com`)
216 |
217 | Export them in your environment before running the server:
218 |
219 | ```bash
220 | export DATADOG_API_KEY="your_api_key"
221 | export DATADOG_APP_KEY="your_app_key"
222 | export DATADOG_SITE="your_datadog_site" # Optional
223 | export DATADOG_SUBDOMAIN="your_datadog_subdomain" # Optional
224 | ```
225 |
226 | ## Installation
227 |
228 | ### Installing via Smithery
229 |
230 | To install Datadog MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@winor30/mcp-server-datadog):
231 |
232 | ```bash
233 | npx -y @smithery/cli install @winor30/mcp-server-datadog --client claude
234 | ```
235 |
236 | ### Manual Installation
237 |
238 | ```bash
239 | pnpm install
240 | pnpm build
241 | pnpm watch # for development with auto-rebuild
242 | ```
243 |
244 | ## Usage with Claude Desktop
245 |
246 | To use this with Claude Desktop, add the following to your `claude_desktop_config.json`:
247 |
248 | On MacOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
249 | On Windows: `%APPDATA%/Claude/claude_desktop_config.json`
250 |
251 | ```json
252 | {
253 | "mcpServers": {
254 | "github": {
255 | "command": "npx",
256 | "args": ["-y", "@modelcontextprotocol/server-github"],
257 | "env": {
258 | "GITHUB_PERSONAL_ACCESS_TOKEN": "<YOUR_TOKEN>"
259 | }
260 | }
261 | }
262 | }
263 | ```
264 |
265 | ```json
266 | {
267 | "mcpServers": {
268 | "datadog": {
269 | "command": "/path/to/mcp-server-datadog/build/index.js",
270 | "env": {
271 | "DATADOG_API_KEY": "<YOUR_API_KEY>",
272 | "DATADOG_APP_KEY": "<YOUR_APP_KEY>",
273 | "DATADOG_SITE": "<YOUR_SITE>", // Optional
274 | "DATADOG_SUBDOMAIN": "<YOUR_SUBDOMAIN>" // Optional
275 | }
276 | }
277 | }
278 | }
279 | ```
280 |
281 | Or specify via `npx`:
282 |
283 | ```json
284 | {
285 | "mcpServers": {
286 | "mcp-server-datadog": {
287 | "command": "npx",
288 | "args": ["-y", "@winor30/mcp-server-datadog"],
289 | "env": {
290 | "DATADOG_API_KEY": "<YOUR_API_KEY>",
291 | "DATADOG_APP_KEY": "<YOUR_APP_KEY>",
292 | "DATADOG_SITE": "<YOUR_SITE>", // Optional
293 | "DATADOG_SUBDOMAIN": "<YOUR_SUBDOMAIN>" // Optional
294 | }
295 | }
296 | }
297 | }
298 | ```
299 |
300 | ## Debugging
301 |
302 | Because MCP servers communicate over standard input/output, debugging can sometimes be tricky. We recommend using the [MCP Inspector](https://github.com/modelcontextprotocol/inspector). You can run the inspector with:
303 |
304 | ```bash
305 | npm run inspector
306 | ```
307 |
308 | The inspector will provide a URL you can open in your browser to see logs and send requests manually.
309 |
310 | ## Contributing
311 |
312 | Contributions are welcome! Feel free to open an issue or a pull request if you have any suggestions, bug reports, or improvements to propose.
313 |
314 | ## License
315 |
316 | This project is licensed under the [Apache License, Version 2.0](./LICENSE).
317 |
```
--------------------------------------------------------------------------------
/src/tools/rum/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { RUM_TOOLS, createRumToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/pnpm-workspace.yaml:
--------------------------------------------------------------------------------
```yaml
1 | packages:
2 | - .
3 |
4 | onlyBuiltDependencies:
5 | - esbuild
6 | - msw
7 |
```
--------------------------------------------------------------------------------
/src/tools/logs/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { LOGS_TOOLS, createLogsToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/traces/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { TRACES_TOOLS, createTracesToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/metrics/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { METRICS_TOOLS, createMetricsToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/incident/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { INCIDENT_TOOLS, createIncidentToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/monitors/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { MONITORS_TOOLS, createMonitorsToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/downtimes/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { DOWNTIMES_TOOLS, createDowntimesToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/src/tools/dashboards/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export { DASHBOARDS_TOOLS, createDashboardsToolHandlers } from './tool'
2 |
```
--------------------------------------------------------------------------------
/tsup.config.ts:
--------------------------------------------------------------------------------
```typescript
1 | export default {
2 | entry: ['src/index.ts'],
3 | dts: true,
4 | format: ['esm'],
5 | outDir: 'build',
6 | }
7 |
```
--------------------------------------------------------------------------------
/jest.config.ts:
--------------------------------------------------------------------------------
```typescript
1 | /** @type {import('jest').Config} */
2 | module.exports = {
3 | preset: 'ts-jest',
4 | testEnvironment: 'node',
5 | testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'],
6 | }
7 |
```
--------------------------------------------------------------------------------
/tests/helpers/datadog.ts:
--------------------------------------------------------------------------------
```typescript
1 | // Base URL for Datadog API
2 | export const baseUrl = 'https://api.datadoghq.com/api'
3 |
4 | export interface DatadogToolResponse {
5 | content: {
6 | type: 'text'
7 | text: string
8 | }[]
9 | }
10 |
```
--------------------------------------------------------------------------------
/tests/setup.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { afterEach, vi } from 'vitest'
2 |
3 | process.env.DATADOG_API_KEY = 'test-api-key'
4 | process.env.DATADOG_APP_KEY = 'test-app-key'
5 |
6 | // Reset handlers after each test
7 | afterEach(() => {
8 | // server.resetHandlers()
9 | vi.clearAllMocks()
10 | })
11 |
```
--------------------------------------------------------------------------------
/src/tools/incident/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const ListIncidentsZodSchema = z.object({
4 | pageSize: z.number().min(1).max(100).default(10),
5 | pageOffset: z.number().min(0).default(0),
6 | })
7 |
8 | export const GetIncidentZodSchema = z.object({
9 | incidentId: z.string().nonempty(),
10 | })
11 |
```
--------------------------------------------------------------------------------
/src/tools/dashboards/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const ListDashboardsZodSchema = z.object({
4 | name: z.string().optional().describe('Filter dashboards by name'),
5 | tags: z.array(z.string()).optional().describe('Filter dashboards by tags'),
6 | })
7 |
8 | export const GetDashboardZodSchema = z.object({
9 | dashboardId: z.string(),
10 | })
11 |
```
--------------------------------------------------------------------------------
/src/tools/monitors/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const GetMonitorsZodSchema = z.object({
4 | groupStates: z
5 | .array(z.enum(['alert', 'warn', 'no data', 'ok']))
6 | .optional()
7 | .describe('Filter monitors by their states'),
8 | name: z.string().optional().describe('Filter monitors by name'),
9 | tags: z.array(z.string()).optional().describe('Filter monitors by tags'),
10 | })
11 |
```
--------------------------------------------------------------------------------
/src/utils/types.ts:
--------------------------------------------------------------------------------
```typescript
1 | import z from 'zod'
2 | import {
3 | Result,
4 | CallToolRequestSchema,
5 | Tool,
6 | } from '@modelcontextprotocol/sdk/types.js'
7 |
8 | type ToolHandler = (
9 | request: z.infer<typeof CallToolRequestSchema>,
10 | ) => Promise<Result>
11 |
12 | export type ToolHandlers<T extends string = string> = Record<T, ToolHandler>
13 |
14 | export type ExtendedTool<T extends string = string> = Tool & { name: T }
15 |
```
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { defineConfig } from 'vitest/config'
2 |
3 | export default defineConfig({
4 | test: {
5 | globals: true,
6 | environment: 'node',
7 | setupFiles: ['./tests/setup.ts'],
8 | include: ['./tests/**/*.test.ts'],
9 | coverage: {
10 | provider: 'v8',
11 | reporter: ['text', 'json', 'html'],
12 | include: ['src/**/*.ts'],
13 | exclude: ['node_modules/', 'tests/'],
14 | },
15 | },
16 | })
17 |
```
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
```javascript
1 | import globals from 'globals'
2 | import pluginJs from '@eslint/js'
3 | import tseslint from 'typescript-eslint'
4 |
5 | /** @type {import('eslint').Linter.Config[]} */
6 | export default [
7 | { files: ['**/*.{js,mjs,cjs,ts}'] },
8 | { ignores: ['node_modules/**', 'build/**'] },
9 | { languageOptions: { globals: globals.browser } },
10 | pluginJs.configs.recommended,
11 | ...tseslint.configs.recommended,
12 | ]
13 |
```
--------------------------------------------------------------------------------
/src/tools/hosts/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Central export file for the Datadog Hosts management tools.
3 | * Re-exports the tools and their handlers from the implementation file.
4 | *
5 | * HOSTS_TOOLS: Array of tool schemas defining the available host management operations
6 | * createHostsToolHandlers: Function that creates host management operation handlers
7 | */
8 | export { HOSTS_TOOLS, createHostsToolHandlers } from './tool'
9 |
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "target": "esnext",
4 | "lib": ["esnext"],
5 | "module": "esnext",
6 | "moduleResolution": "bundler",
7 | "outDir": "./build",
8 | "rootDir": "./src",
9 | "strict": true,
10 | "esModuleInterop": true,
11 | "skipLibCheck": true,
12 | "forceConsistentCasingInFileNames": true,
13 | "resolveJsonModule": true
14 | },
15 | "include": ["src/**/*"],
16 | "exclude": ["node_modules"]
17 | }
18 |
```
--------------------------------------------------------------------------------
/src/tools/metrics/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const QueryMetricsZodSchema = z.object({
4 | from: z
5 | .number()
6 | .describe(
7 | 'Start of the queried time period, seconds since the Unix epoch.',
8 | ),
9 | to: z
10 | .number()
11 | .describe('End of the queried time period, seconds since the Unix epoch.'),
12 | query: z
13 | .string()
14 | .describe('Datadog metrics query string. e.g. "avg:system.cpu.user{*}'),
15 | })
16 |
17 | export type QueryMetricsArgs = z.infer<typeof QueryMetricsZodSchema>
18 |
```
--------------------------------------------------------------------------------
/tests/helpers/mock.ts:
--------------------------------------------------------------------------------
```typescript
1 | interface MockToolRequest {
2 | method: 'tools/call'
3 | params: {
4 | name: string
5 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
6 | arguments: Record<string, any>
7 | }
8 | }
9 |
10 | export function createMockToolRequest(
11 | toolName: string,
12 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
13 | args: Record<string, any>,
14 | ): MockToolRequest {
15 | return {
16 | method: 'tools/call',
17 | params: {
18 | name: toolName,
19 | arguments: args,
20 | },
21 | }
22 | }
23 |
```
--------------------------------------------------------------------------------
/src/tools/traces/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const ListTracesZodSchema = z.object({
4 | query: z.string().describe('Datadog APM trace query string'),
5 | from: z.number().describe('Start time in epoch seconds'),
6 | to: z.number().describe('End time in epoch seconds'),
7 | limit: z
8 | .number()
9 | .optional()
10 | .default(100)
11 | .describe('Maximum number of traces to return'),
12 | sort: z
13 | .enum(['timestamp', '-timestamp'])
14 | .optional()
15 | .default('-timestamp')
16 | .describe('Sort order for traces'),
17 | service: z.string().optional().describe('Filter by service name'),
18 | operation: z.string().optional().describe('Filter by operation name'),
19 | })
20 |
21 | export type ListTracesArgs = z.infer<typeof ListTracesZodSchema>
22 |
```
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Publish to npm
2 | on:
3 | push:
4 | tags:
5 | - 'v*.*.*'
6 |
7 | jobs:
8 | publish:
9 | runs-on: ubuntu-latest
10 |
11 | permissions:
12 | contents: read
13 | id-token: write
14 |
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v4
18 |
19 | - name: Set up Node
20 | uses: actions/setup-node@v4
21 | with:
22 | node-version: 20
23 | registry-url: 'https://registry.npmjs.org/'
24 |
25 | - uses: pnpm/action-setup@v4
26 |
27 | - name: Install dependencies
28 | run: pnpm install --frozen-lockfile
29 |
30 | - name: Build
31 | run: pnpm run build
32 |
33 | - name: Publish
34 | run: pnpm publish --provenance --access public --no-git-checks
35 | env:
36 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
37 |
```
--------------------------------------------------------------------------------
/src/utils/helper.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Logs a formatted message with a specified severity to stderr.
3 | *
4 | * The MCP server uses stdio transport, so using console.log might interfere with the transport.
5 | * Therefore, logging messages are written to stderr.
6 | *
7 | * @param {'info' | 'error'} severity - The severity level of the log message.
8 | * @param {...any[]} args - Additional arguments to be logged, which will be concatenated into a single string.
9 | */
10 | export function log(
11 | severity: 'info' | 'error',
12 | ...args: any[] // eslint-disable-line @typescript-eslint/no-explicit-any
13 | ) {
14 | const msg = `[${severity.toUpperCase()} ${new Date().toISOString()}] ${args.join(' ')}\n`
15 | process.stderr.write(msg)
16 | }
17 |
18 | export { version as mcpDatadogVersion } from '../../package.json'
19 |
20 | export function unreachable(value: never): never {
21 | throw new Error(`Unreachable code: ${value}`)
22 | }
23 |
```
--------------------------------------------------------------------------------
/tests/helpers/msw.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { RequestHandler } from 'msw'
2 | import { SetupServerApi, setupServer as setupServerNode } from 'msw/node'
3 |
4 | export function setupServer(...handlers: RequestHandler[]) {
5 | const server = setupServerNode(...handlers)
6 | debugServer(server)
7 | return server
8 | }
9 |
10 | function debugServer(server: SetupServerApi) {
11 | // Enable network request debugging
12 | server.listen({
13 | onUnhandledRequest: 'warn',
14 | })
15 |
16 | // Log all requests that pass through MSW
17 | server.events.on('request:start', ({ request }) => {
18 | console.log(`[MSW] Request started: ${request.method} ${request.url}`)
19 | })
20 |
21 | server.events.on('request:match', ({ request }) => {
22 | console.log(`[MSW] Request matched: ${request.method} ${request.url}`)
23 | })
24 |
25 | server.events.on('request:unhandled', ({ request }) => {
26 | console.log(`[MSW] Request not handled: ${request.method} ${request.url}`)
27 | })
28 | }
29 |
```
--------------------------------------------------------------------------------
/src/tools/downtimes/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const ListDowntimesZodSchema = z.object({
4 | currentOnly: z.boolean().optional(),
5 | })
6 |
7 | export const ScheduleDowntimeZodSchema = z.object({
8 | scope: z.string().nonempty(), // example: 'host:my-host'
9 | start: z.number().optional(), // UNIX timestamp
10 | end: z.number().optional(), // UNIX timestamp
11 | message: z.string().optional(),
12 | timezone: z.string().optional(), // example: 'UTC', 'America/New_York'
13 | monitorId: z.number().optional(),
14 | monitorTags: z.array(z.string()).optional(),
15 | recurrence: z
16 | .object({
17 | type: z.enum(['days', 'weeks', 'months', 'years']),
18 | period: z.number().min(1),
19 | weekDays: z
20 | .array(z.enum(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']))
21 | .optional(),
22 | until: z.number().optional(), // UNIX timestamp
23 | })
24 | .optional(),
25 | })
26 |
27 | export const CancelDowntimeZodSchema = z.object({
28 | downtimeId: z.number(),
29 | })
30 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
2 | FROM node:22.12-alpine AS builder
3 |
4 | # Install pnpm globally
5 | RUN npm install -g pnpm@10
6 |
7 | WORKDIR /app
8 |
9 | # Copy package files and install dependencies
10 | COPY package.json pnpm-lock.yaml ./
11 | RUN pnpm install --frozen-lockfile --ignore-scripts
12 |
13 | # Copy the rest of the files
14 | COPY . .
15 |
16 | # Build the project
17 | RUN pnpm build
18 |
19 | FROM node:22.12-alpine AS installer
20 |
21 | # Install pnpm globally
22 | RUN npm install -g pnpm@10
23 |
24 | WORKDIR /app
25 |
26 | # Copy package files and install only production dependencies
27 | COPY package.json pnpm-lock.yaml ./
28 | RUN pnpm install --frozen-lockfile --ignore-scripts --prod
29 |
30 | FROM node:22.12-alpine AS release
31 |
32 | WORKDIR /app
33 |
34 | COPY --from=builder /app/build /app/build
35 | COPY --from=installer /app/node_modules /app/node_modules
36 |
37 | # Expose port if needed (Not explicitly mentioned, MCP runs via stdio, so not needed)
38 |
39 | CMD ["node", "build/index.js"]
40 |
```
--------------------------------------------------------------------------------
/src/tools/metrics/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { QueryMetricsZodSchema } from './schema'
5 |
6 | type MetricsToolName = 'query_metrics'
7 | type MetricsTool = ExtendedTool<MetricsToolName>
8 |
9 | export const METRICS_TOOLS: MetricsTool[] = [
10 | createToolSchema(
11 | QueryMetricsZodSchema,
12 | 'query_metrics',
13 | 'Query timeseries points of metrics from Datadog',
14 | ),
15 | ] as const
16 |
17 | type MetricsToolHandlers = ToolHandlers<MetricsToolName>
18 |
19 | export const createMetricsToolHandlers = (
20 | apiInstance: v1.MetricsApi,
21 | ): MetricsToolHandlers => {
22 | return {
23 | query_metrics: async (request) => {
24 | const { from, to, query } = QueryMetricsZodSchema.parse(
25 | request.params.arguments,
26 | )
27 |
28 | const response = await apiInstance.queryMetrics({
29 | from,
30 | to,
31 | query,
32 | })
33 |
34 | return {
35 | content: [
36 | {
37 | type: 'text',
38 | text: `Queried metrics data: ${JSON.stringify({ response })}`,
39 | },
40 | ],
41 | }
42 | },
43 | }
44 | }
45 |
```
--------------------------------------------------------------------------------
/src/utils/datadog.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { client } from '@datadog/datadog-api-client'
2 |
3 | interface CreateDatadogConfigParams {
4 | apiKeyAuth: string
5 | appKeyAuth: string
6 | site?: string
7 | subdomain?: string
8 | }
9 |
10 | export function createDatadogConfig(
11 | config: CreateDatadogConfigParams,
12 | ): client.Configuration {
13 | if (!config.apiKeyAuth || !config.appKeyAuth) {
14 | throw new Error('Datadog API key and APP key are required')
15 | }
16 | const datadogConfig = client.createConfiguration({
17 | authMethods: {
18 | apiKeyAuth: config.apiKeyAuth,
19 | appKeyAuth: config.appKeyAuth,
20 | },
21 | })
22 |
23 | if (config.site != null) {
24 | datadogConfig.setServerVariables({
25 | site: config.site,
26 | })
27 | }
28 |
29 | if (config.subdomain != null) {
30 | datadogConfig.setServerVariables({
31 | subdomain: config.subdomain,
32 | })
33 | }
34 |
35 | datadogConfig.unstableOperations = {
36 | 'v2.listIncidents': true,
37 | 'v2.getIncident': true,
38 | }
39 |
40 | return datadogConfig
41 | }
42 |
43 | export function getDatadogSite(ddConfig: client.Configuration): string {
44 | const config = ddConfig.servers[0]?.getConfiguration()
45 | if (config == null) {
46 | throw new Error('Datadog site is not set')
47 | }
48 | return config.site
49 | }
50 |
```
--------------------------------------------------------------------------------
/src/tools/logs/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | export const GetLogsZodSchema = z.object({
4 | query: z.string().default('').describe('Datadog logs query string'),
5 | from: z.number().describe('Start time in epoch seconds'),
6 | to: z.number().describe('End time in epoch seconds'),
7 | limit: z
8 | .number()
9 | .optional()
10 | .default(100)
11 | .describe('Maximum number of logs to return. Default is 100.'),
12 | })
13 |
14 | /**
15 | * Schema for retrieving all unique service names from logs.
16 | * Defines parameters for querying logs within a time window.
17 | *
18 | * @param query - Optional. Additional query filter for log search. Defaults to "*" (all logs)
19 | * @param from - Required. Start time in epoch seconds
20 | * @param to - Required. End time in epoch seconds
21 | * @param limit - Optional. Maximum number of logs to search through. Default is 1000.
22 | */
23 | export const GetAllServicesZodSchema = z.object({
24 | query: z
25 | .string()
26 | .default('*')
27 | .describe('Optional query filter for log search'),
28 | from: z.number().describe('Start time in epoch seconds'),
29 | to: z.number().describe('End time in epoch seconds'),
30 | limit: z
31 | .number()
32 | .optional()
33 | .default(1000)
34 | .describe('Maximum number of logs to search through. Default is 1000.'),
35 | })
36 |
```
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | required:
9 | - datadogApiKey
10 | - datadogAppKey
11 | properties:
12 | datadogApiKey:
13 | type: string
14 | description: Your Datadog API key
15 | datadogAppKey:
16 | type: string
17 | description: Your Datadog Application key
18 | datadogSite:
19 | type: string
20 | default: ''
21 | description: Optional Datadog site (e.g. datadoghq.eu)
22 | datadogSubdomain:
23 | type: string
24 | default: ''
25 | description: Optional Datadog subdomain (e.g. <your-subdomain>.datadoghq.com)
26 | commandFunction:
27 | # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
28 | |-
29 | (config) => ({
30 | command: 'node',
31 | args: ['build/index.js'],
32 | env: Object.assign({}, process.env, {
33 | DATADOG_API_KEY: config.datadogApiKey,
34 | DATADOG_APP_KEY: config.datadogAppKey,
35 | ...(config.datadogSite && { DATADOG_SITE: config.datadogSite }),
36 | ...(config.datadogSubdomain && { DATADOG_SUBDOMAIN: config.datadogSubdomain })
37 | })
38 | })
39 | exampleConfig:
40 | datadogApiKey: your_datadog_api_key_here
41 | datadogAppKey: your_datadog_app_key_here
42 | datadogSite: datadoghq.com
43 | datadogSubdomain: your-subdomain
44 |
```
--------------------------------------------------------------------------------
/tests/utils/tool.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest'
2 | import { Tool } from '@modelcontextprotocol/sdk/types.js'
3 | import { createToolSchema } from '../../src/utils/tool'
4 | import { z } from 'zod'
5 |
6 | describe('createToolSchema', () => {
7 | it('should generate tool schema with correct inputSchema when definitions exist', () => {
8 | // Create a dummy schema with a matching definition for the tool name
9 | const dummySchema = z.object({
10 | foo: z.string().describe('foo description'),
11 | bar: z.number().describe('bar description').optional(),
12 | baz: z.boolean().describe('baz description').default(false),
13 | qux: z.number().describe('qux description').min(10).max(20).default(15),
14 | })
15 |
16 | // Call createToolSchema with the dummy schema, tool name, and description
17 | const gotTool = createToolSchema(
18 | dummySchema,
19 | 'test',
20 | 'dummy test description',
21 | )
22 |
23 | // Expected inputSchema based on the dummy schema
24 | const expectedInputSchema: Tool = {
25 | name: 'test',
26 | description: 'dummy test description',
27 | inputSchema: {
28 | type: 'object',
29 | properties: {
30 | foo: {
31 | type: 'string',
32 | description: 'foo description',
33 | },
34 | bar: {
35 | type: 'number',
36 | description: 'bar description',
37 | },
38 | baz: {
39 | type: 'boolean',
40 | description: 'baz description',
41 | default: false,
42 | },
43 | qux: {
44 | type: 'number',
45 | description: 'qux description',
46 | default: 15,
47 | minimum: 10,
48 | maximum: 20,
49 | },
50 | },
51 | required: ['foo'],
52 | },
53 | }
54 |
55 | // Verify the returned tool object matches expected structure
56 | expect(gotTool).toEqual(expectedInputSchema)
57 | })
58 | })
59 |
```
--------------------------------------------------------------------------------
/src/utils/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Tool } from '@modelcontextprotocol/sdk/types.js'
2 | import { ZodSchema } from 'zod'
3 | import zodToJsonSchema from 'zod-to-json-schema'
4 |
5 | type JsonSchema = Record<string, any> // eslint-disable-line @typescript-eslint/no-explicit-any
6 |
7 | function pickRootObjectProperty(
8 | fullSchema: JsonSchema,
9 | schemaName: string,
10 | ): {
11 | type: 'object'
12 | properties: any // eslint-disable-line @typescript-eslint/no-explicit-any
13 | required?: string[]
14 | } {
15 | const definitions = fullSchema.definitions ?? {}
16 | const root = definitions[schemaName]
17 | return {
18 | type: 'object',
19 | properties: root?.properties ?? {},
20 | required: root?.required ?? [],
21 | }
22 | }
23 |
24 | /**
25 | * Creates a tool definition object using the provided Zod schema.
26 | *
27 | * This function converts a Zod schema (acting as the single source of truth) into a JSON Schema,
28 | * extracts the relevant root object properties, and embeds them into the tool definition.
29 | * This approach avoids duplicate schema definitions and ensures type safety and consistency.
30 | *
31 | * Note: The provided name is also used as the tool's name in the Model Context Protocol.
32 | *
33 | * @param schema - The Zod schema representing the tool's parameters.
34 | * @param name - The name of the tool and the key used to extract the corresponding schema definition, and the tool's name in the Model Context Protocol.
35 | * @param description - A brief description of the tool's functionality.
36 | * @returns A tool object containing the name, description, and input JSON Schema.
37 | */
38 | export function createToolSchema<T extends string>(
39 | schema: ZodSchema<any>, // eslint-disable-line @typescript-eslint/no-explicit-any
40 | name: T,
41 | description: string,
42 | ): Tool & { name: T } {
43 | return {
44 | name,
45 | description,
46 | inputSchema: pickRootObjectProperty(
47 | zodToJsonSchema(schema, { name }),
48 | name,
49 | ),
50 | }
51 | }
52 |
```
--------------------------------------------------------------------------------
/src/tools/traces/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { ListTracesZodSchema } from './schema'
5 |
6 | type TracesToolName = 'list_traces'
7 | type TracesTool = ExtendedTool<TracesToolName>
8 |
9 | export const TRACES_TOOLS: TracesTool[] = [
10 | createToolSchema(
11 | ListTracesZodSchema,
12 | 'list_traces',
13 | 'Get APM traces from Datadog',
14 | ),
15 | ] as const
16 |
17 | type TracesToolHandlers = ToolHandlers<TracesToolName>
18 |
19 | export const createTracesToolHandlers = (
20 | apiInstance: v2.SpansApi,
21 | ): TracesToolHandlers => {
22 | return {
23 | list_traces: async (request) => {
24 | const {
25 | query,
26 | from,
27 | to,
28 | limit = 100,
29 | sort = '-timestamp',
30 | service,
31 | operation,
32 | } = ListTracesZodSchema.parse(request.params.arguments)
33 |
34 | const response = await apiInstance.listSpans({
35 | body: {
36 | data: {
37 | attributes: {
38 | filter: {
39 | query: [
40 | query,
41 | ...(service ? [`service:${service}`] : []),
42 | ...(operation ? [`operation:${operation}`] : []),
43 | ].join(' '),
44 | from: new Date(from * 1000).toISOString(),
45 | to: new Date(to * 1000).toISOString(),
46 | },
47 | sort: sort as 'timestamp' | '-timestamp',
48 | page: { limit },
49 | },
50 | type: 'search_request',
51 | },
52 | },
53 | })
54 |
55 | if (!response.data) {
56 | throw new Error('No traces data returned')
57 | }
58 |
59 | return {
60 | content: [
61 | {
62 | type: 'text',
63 | text: `Traces: ${JSON.stringify({
64 | traces: response.data,
65 | count: response.data.length,
66 | })}`,
67 | },
68 | ],
69 | }
70 | },
71 | }
72 | }
73 |
```
--------------------------------------------------------------------------------
/src/tools/incident/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetIncidentZodSchema, ListIncidentsZodSchema } from './schema'
5 |
6 | type IncidentToolName = 'list_incidents' | 'get_incident'
7 | type IncidentTool = ExtendedTool<IncidentToolName>
8 |
9 | export const INCIDENT_TOOLS: IncidentTool[] = [
10 | createToolSchema(
11 | ListIncidentsZodSchema,
12 | 'list_incidents',
13 | 'Get incidents from Datadog',
14 | ),
15 | createToolSchema(
16 | GetIncidentZodSchema,
17 | 'get_incident',
18 | 'Get an incident from Datadog',
19 | ),
20 | ] as const
21 |
22 | type IncidentToolHandlers = ToolHandlers<IncidentToolName>
23 |
24 | export const createIncidentToolHandlers = (
25 | apiInstance: v2.IncidentsApi,
26 | ): IncidentToolHandlers => {
27 | return {
28 | list_incidents: async (request) => {
29 | const { pageSize, pageOffset } = ListIncidentsZodSchema.parse(
30 | request.params.arguments,
31 | )
32 |
33 | const response = await apiInstance.listIncidents({
34 | pageSize,
35 | pageOffset,
36 | })
37 |
38 | if (response.data == null) {
39 | throw new Error('No incidents data returned')
40 | }
41 |
42 | return {
43 | content: [
44 | {
45 | type: 'text',
46 | text: `Listed incidents:\n${response.data
47 | .map((d) => JSON.stringify(d))
48 | .join('\n')}`,
49 | },
50 | ],
51 | }
52 | },
53 | get_incident: async (request) => {
54 | const { incidentId } = GetIncidentZodSchema.parse(
55 | request.params.arguments,
56 | )
57 |
58 | const response = await apiInstance.getIncident({
59 | incidentId,
60 | })
61 |
62 | if (response.data == null) {
63 | throw new Error('No incident data returned')
64 | }
65 |
66 | return {
67 | content: [
68 | {
69 | type: 'text',
70 | text: `Incident: ${JSON.stringify(response.data)}`,
71 | },
72 | ],
73 | }
74 | },
75 | }
76 | }
77 |
```
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Check out repository code
14 | uses: actions/checkout@v4
15 |
16 | - name: Set up Node
17 | uses: actions/setup-node@v4
18 | with:
19 | node-version: 20
20 |
21 | - uses: pnpm/action-setup@v4
22 |
23 | - name: Install dependencies
24 | run: pnpm install --frozen-lockfile
25 |
26 | - name: Run ESLint
27 | run: pnpm run lint
28 |
29 | format:
30 | runs-on: ubuntu-latest
31 | steps:
32 | - name: Check out repository code
33 | uses: actions/checkout@v4
34 |
35 | - name: Set up Node
36 | uses: actions/setup-node@v4
37 | with:
38 | node-version: 20
39 |
40 | - uses: pnpm/action-setup@v4
41 |
42 | - name: Install dependencies
43 | run: pnpm install --frozen-lockfile
44 |
45 | - name: Check code format with Prettier
46 | run: pnpm exec prettier --check .
47 |
48 | build:
49 | runs-on: ubuntu-latest
50 | steps:
51 | - name: Check out repository code
52 | uses: actions/checkout@v4
53 |
54 | - name: Set up Node
55 | uses: actions/setup-node@v4
56 | with:
57 | node-version: 20
58 |
59 | - uses: pnpm/action-setup@v4
60 |
61 | - name: Install dependencies
62 | run: pnpm install --frozen-lockfile
63 |
64 | - name: Build
65 | run: pnpm run build
66 |
67 | test:
68 | permissions:
69 | contents: read
70 | pull-requests: write
71 | runs-on: ubuntu-latest
72 | steps:
73 | - name: Checkout
74 | uses: actions/checkout@v4
75 | with:
76 | fetch-depth: 0
77 |
78 | - name: Set up Node
79 | uses: actions/setup-node@v4
80 | with:
81 | node-version: 20
82 |
83 | - uses: pnpm/action-setup@v4
84 |
85 | - name: Install dependencies
86 | run: pnpm install --frozen-lockfile
87 |
88 | - name: Run tests
89 | run: pnpm test:coverage
90 |
91 | - name: Upload results to Codecov
92 | uses: codecov/codecov-action@v5
93 | with:
94 | token: ${{ secrets.CODECOV_TOKEN }}
95 | directory: coverage
96 |
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "@winor30/mcp-server-datadog",
3 | "version": "1.7.0",
4 | "description": "MCP server for interacting with Datadog API",
5 | "repository": {
6 | "type": "git",
7 | "url": "https://github.com/winor30/mcp-server-datadog.git"
8 | },
9 | "type": "module",
10 | "bin": {
11 | "mcp-server-datadog": "./build/index.js"
12 | },
13 | "main": "build/index.js",
14 | "module": "build/index.js",
15 | "types": "build/index.d.ts",
16 | "files": [
17 | "build",
18 | "README.md"
19 | ],
20 | "access": "public",
21 | "publishConfig": {
22 | "registry": "https://registry.npmjs.org",
23 | "access": "public"
24 | },
25 | "scripts": {
26 | "build": "tsup && node -e \"require('fs').chmodSync('build/index.js', '755')\"",
27 | "prepare": "husky",
28 | "watch": "tsup --watch",
29 | "inspector": "npx @modelcontextprotocol/inspector build/index.js",
30 | "lint": "eslint . --ext .ts,.js --fix",
31 | "format": "prettier --write .",
32 | "test": "vitest run",
33 | "test:coverage": "vitest run --coverage",
34 | "test:watch": "vitest",
35 | "lint-staged": "lint-staged"
36 | },
37 | "dependencies": {
38 | "@datadog/datadog-api-client": "^1.34.1",
39 | "@modelcontextprotocol/sdk": "0.6.0",
40 | "zod": "^3.24.3",
41 | "zod-to-json-schema": "^3.24.5"
42 | },
43 | "devDependencies": {
44 | "@eslint/eslintrc": "^3.3.1",
45 | "@eslint/js": "^9.25.0",
46 | "@types/jest": "^29.5.14",
47 | "@types/node": "^20.17.30",
48 | "@vitest/coverage-v8": "3.0.8",
49 | "eslint": "^9.25.0",
50 | "globals": "^16.0.0",
51 | "husky": "^9.1.7",
52 | "jest": "^29.7.0",
53 | "msw": "^2.7.5",
54 | "prettier": "^3.5.3",
55 | "ts-jest": "^29.3.2",
56 | "ts-node": "^10.9.2",
57 | "tsup": "^8.4.0",
58 | "typescript": "^5.8.3",
59 | "typescript-eslint": "^8.30.1",
60 | "vitest": "^3.1.4"
61 | },
62 | "engines": {
63 | "node": ">=20.x",
64 | "pnpm": ">=10"
65 | },
66 | "pnpm": {
67 | "overrides": {
68 | "vite": ">=6.3.4"
69 | }
70 | },
71 | "lint-staged": {
72 | "*.{js,ts}": [
73 | "eslint --fix",
74 | "prettier --write"
75 | ],
76 | "*.{json,md}": [
77 | "prettier --write"
78 | ]
79 | },
80 | "packageManager": "[email protected]"
81 | }
82 |
```
--------------------------------------------------------------------------------
/src/tools/dashboards/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetDashboardZodSchema, ListDashboardsZodSchema } from './schema'
5 |
6 | type DashboardsToolName = 'list_dashboards' | 'get_dashboard'
7 | type DashboardsTool = ExtendedTool<DashboardsToolName>
8 |
9 | export const DASHBOARDS_TOOLS: DashboardsTool[] = [
10 | createToolSchema(
11 | ListDashboardsZodSchema,
12 | 'list_dashboards',
13 | 'Get list of dashboards from Datadog',
14 | ),
15 | createToolSchema(
16 | GetDashboardZodSchema,
17 | 'get_dashboard',
18 | 'Get a dashboard from Datadog',
19 | ),
20 | ] as const
21 |
22 | type DashboardsToolHandlers = ToolHandlers<DashboardsToolName>
23 |
24 | export const createDashboardsToolHandlers = (
25 | apiInstance: v1.DashboardsApi,
26 | ): DashboardsToolHandlers => {
27 | return {
28 | list_dashboards: async (request) => {
29 | const { name, tags } = ListDashboardsZodSchema.parse(
30 | request.params.arguments,
31 | )
32 |
33 | const response = await apiInstance.listDashboards({
34 | filterShared: false,
35 | })
36 |
37 | if (!response.dashboards) {
38 | throw new Error('No dashboards data returned')
39 | }
40 |
41 | // Filter dashboards based on name and tags if provided
42 | let filteredDashboards = response.dashboards
43 | if (name) {
44 | const searchTerm = name.toLowerCase()
45 | filteredDashboards = filteredDashboards.filter((dashboard) =>
46 | dashboard.title?.toLowerCase().includes(searchTerm),
47 | )
48 | }
49 | if (tags && tags.length > 0) {
50 | filteredDashboards = filteredDashboards.filter((dashboard) => {
51 | const dashboardTags = dashboard.description?.split(',') || []
52 | return tags.every((tag) => dashboardTags.includes(tag))
53 | })
54 | }
55 |
56 | const dashboards = filteredDashboards.map((dashboard) => ({
57 | ...dashboard,
58 | url: `https://app.datadoghq.com/dashboard/${dashboard.id}`,
59 | }))
60 |
61 | return {
62 | content: [
63 | {
64 | type: 'text',
65 | text: `Dashboards: ${JSON.stringify(dashboards)}`,
66 | },
67 | ],
68 | }
69 | },
70 | get_dashboard: async (request) => {
71 | const { dashboardId } = GetDashboardZodSchema.parse(
72 | request.params.arguments,
73 | )
74 |
75 | const response = await apiInstance.getDashboard({
76 | dashboardId,
77 | })
78 |
79 | return {
80 | content: [
81 | {
82 | type: 'text',
83 | text: `Dashboard: ${JSON.stringify(response)}`,
84 | },
85 | ],
86 | }
87 | },
88 | }
89 | }
90 |
```
--------------------------------------------------------------------------------
/src/tools/logs/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetLogsZodSchema, GetAllServicesZodSchema } from './schema'
5 |
6 | type LogsToolName = 'get_logs' | 'get_all_services'
7 | type LogsTool = ExtendedTool<LogsToolName>
8 |
9 | export const LOGS_TOOLS: LogsTool[] = [
10 | createToolSchema(
11 | GetLogsZodSchema,
12 | 'get_logs',
13 | 'Search and retrieve logs from Datadog',
14 | ),
15 | createToolSchema(
16 | GetAllServicesZodSchema,
17 | 'get_all_services',
18 | 'Extract all unique service names from logs',
19 | ),
20 | ] as const
21 |
22 | type LogsToolHandlers = ToolHandlers<LogsToolName>
23 |
24 | export const createLogsToolHandlers = (
25 | apiInstance: v2.LogsApi,
26 | ): LogsToolHandlers => ({
27 | get_logs: async (request) => {
28 | const { query, from, to, limit } = GetLogsZodSchema.parse(
29 | request.params.arguments,
30 | )
31 |
32 | const response = await apiInstance.listLogs({
33 | body: {
34 | filter: {
35 | query,
36 | // `from` and `to` are in epoch seconds, but the Datadog API expects milliseconds
37 | from: `${from * 1000}`,
38 | to: `${to * 1000}`,
39 | },
40 | page: {
41 | limit,
42 | },
43 | sort: '-timestamp',
44 | },
45 | })
46 |
47 | if (response.data == null) {
48 | throw new Error('No logs data returned')
49 | }
50 |
51 | return {
52 | content: [
53 | {
54 | type: 'text',
55 | text: `Logs data: ${JSON.stringify(response.data)}`,
56 | },
57 | ],
58 | }
59 | },
60 |
61 | get_all_services: async (request) => {
62 | const { query, from, to, limit } = GetAllServicesZodSchema.parse(
63 | request.params.arguments,
64 | )
65 |
66 | const response = await apiInstance.listLogs({
67 | body: {
68 | filter: {
69 | query,
70 | // `from` and `to` are in epoch seconds, but the Datadog API expects milliseconds
71 | from: `${from * 1000}`,
72 | to: `${to * 1000}`,
73 | },
74 | page: {
75 | limit,
76 | },
77 | sort: '-timestamp',
78 | },
79 | })
80 |
81 | if (response.data == null) {
82 | throw new Error('No logs data returned')
83 | }
84 |
85 | // Extract unique services from logs
86 | const services = new Set<string>()
87 |
88 | for (const log of response.data) {
89 | // Access service attribute from logs based on the Datadog API structure
90 | if (log.attributes && log.attributes.service) {
91 | services.add(log.attributes.service)
92 | }
93 | }
94 |
95 | return {
96 | content: [
97 | {
98 | type: 'text',
99 | text: `Services: ${JSON.stringify(Array.from(services).sort())}`,
100 | },
101 | ],
102 | }
103 | },
104 | })
105 |
```
--------------------------------------------------------------------------------
/src/tools/downtimes/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | ListDowntimesZodSchema,
6 | ScheduleDowntimeZodSchema,
7 | CancelDowntimeZodSchema,
8 | } from './schema'
9 |
10 | type DowntimesToolName =
11 | | 'list_downtimes'
12 | | 'schedule_downtime'
13 | | 'cancel_downtime'
14 | type DowntimesTool = ExtendedTool<DowntimesToolName>
15 |
16 | export const DOWNTIMES_TOOLS: DowntimesTool[] = [
17 | createToolSchema(
18 | ListDowntimesZodSchema,
19 | 'list_downtimes',
20 | 'List scheduled downtimes from Datadog',
21 | ),
22 | createToolSchema(
23 | ScheduleDowntimeZodSchema,
24 | 'schedule_downtime',
25 | 'Schedule a downtime in Datadog',
26 | ),
27 | createToolSchema(
28 | CancelDowntimeZodSchema,
29 | 'cancel_downtime',
30 | 'Cancel a scheduled downtime in Datadog',
31 | ),
32 | ] as const
33 |
34 | type DowntimesToolHandlers = ToolHandlers<DowntimesToolName>
35 |
36 | export const createDowntimesToolHandlers = (
37 | apiInstance: v1.DowntimesApi,
38 | ): DowntimesToolHandlers => {
39 | return {
40 | list_downtimes: async (request) => {
41 | const { currentOnly } = ListDowntimesZodSchema.parse(
42 | request.params.arguments,
43 | )
44 |
45 | const res = await apiInstance.listDowntimes({
46 | currentOnly,
47 | })
48 |
49 | return {
50 | content: [
51 | {
52 | type: 'text',
53 | text: `Listed downtimes:\n${JSON.stringify(res, null, 2)}`,
54 | },
55 | ],
56 | }
57 | },
58 |
59 | schedule_downtime: async (request) => {
60 | const params = ScheduleDowntimeZodSchema.parse(request.params.arguments)
61 |
62 | // Convert to the format expected by Datadog client
63 | const downtimeData: v1.Downtime = {
64 | scope: [params.scope],
65 | start: params.start,
66 | end: params.end,
67 | message: params.message,
68 | timezone: params.timezone,
69 | monitorId: params.monitorId,
70 | monitorTags: params.monitorTags,
71 | }
72 |
73 | // Add recurrence configuration if provided
74 | if (params.recurrence) {
75 | downtimeData.recurrence = {
76 | type: params.recurrence.type,
77 | period: params.recurrence.period,
78 | weekDays: params.recurrence.weekDays,
79 | }
80 | }
81 |
82 | const res = await apiInstance.createDowntime({
83 | body: downtimeData,
84 | })
85 |
86 | return {
87 | content: [
88 | {
89 | type: 'text',
90 | text: `Scheduled downtime: ${JSON.stringify(res, null, 2)}`,
91 | },
92 | ],
93 | }
94 | },
95 |
96 | cancel_downtime: async (request) => {
97 | const { downtimeId } = CancelDowntimeZodSchema.parse(
98 | request.params.arguments,
99 | )
100 |
101 | await apiInstance.cancelDowntime({
102 | downtimeId,
103 | })
104 |
105 | return {
106 | content: [
107 | {
108 | type: 'text',
109 | text: `Cancelled downtime with ID: ${downtimeId}`,
110 | },
111 | ],
112 | }
113 | },
114 | }
115 | }
116 |
```
--------------------------------------------------------------------------------
/src/tools/monitors/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetMonitorsZodSchema } from './schema'
5 | import { unreachable } from '../../utils/helper'
6 | import { UnparsedObject } from '@datadog/datadog-api-client/dist/packages/datadog-api-client-common/util.js'
7 |
8 | type MonitorsToolName = 'get_monitors'
9 | type MonitorsTool = ExtendedTool<MonitorsToolName>
10 |
11 | export const MONITORS_TOOLS: MonitorsTool[] = [
12 | createToolSchema(
13 | GetMonitorsZodSchema,
14 | 'get_monitors',
15 | 'Get monitors status from Datadog',
16 | ),
17 | ] as const
18 |
19 | type MonitorsToolHandlers = ToolHandlers<MonitorsToolName>
20 |
21 | export const createMonitorsToolHandlers = (
22 | apiInstance: v1.MonitorsApi,
23 | ): MonitorsToolHandlers => {
24 | return {
25 | get_monitors: async (request) => {
26 | const { groupStates, name, tags } = GetMonitorsZodSchema.parse(
27 | request.params.arguments,
28 | )
29 |
30 | const response = await apiInstance.listMonitors({
31 | groupStates: groupStates?.join(','),
32 | name,
33 | tags: tags?.join(','),
34 | })
35 |
36 | if (response == null) {
37 | throw new Error('No monitors data returned')
38 | }
39 |
40 | const monitors = response.map((monitor) => ({
41 | name: monitor.name || '',
42 | id: monitor.id || 0,
43 | status: (monitor.overallState as string) || 'unknown',
44 | message: monitor.message,
45 | tags: monitor.tags || [],
46 | query: monitor.query || '',
47 | lastUpdatedTs: monitor.modified
48 | ? Math.floor(new Date(monitor.modified).getTime() / 1000)
49 | : undefined,
50 | }))
51 |
52 | // Calculate summary
53 | const summary = response.reduce(
54 | (acc, monitor) => {
55 | const status = monitor.overallState
56 | if (status == null || status instanceof UnparsedObject) {
57 | return acc
58 | }
59 |
60 | switch (status) {
61 | case 'Alert':
62 | acc.alert++
63 | break
64 | case 'Warn':
65 | acc.warn++
66 | break
67 | case 'No Data':
68 | acc.noData++
69 | break
70 | case 'OK':
71 | acc.ok++
72 | break
73 | case 'Ignored':
74 | acc.ignored++
75 | break
76 | case 'Skipped':
77 | acc.skipped++
78 | break
79 | case 'Unknown':
80 | acc.unknown++
81 | break
82 | default:
83 | unreachable(status)
84 | }
85 | return acc
86 | },
87 | {
88 | alert: 0,
89 | warn: 0,
90 | noData: 0,
91 | ok: 0,
92 | ignored: 0,
93 | skipped: 0,
94 | unknown: 0,
95 | },
96 | )
97 |
98 | return {
99 | content: [
100 | {
101 | type: 'text',
102 | text: `Monitors: ${JSON.stringify(monitors)}`,
103 | },
104 | {
105 | type: 'text',
106 | text: `Summary of monitors: ${JSON.stringify(summary)}`,
107 | },
108 | ],
109 | }
110 | },
111 | }
112 | }
113 |
```
--------------------------------------------------------------------------------
/src/tools/rum/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | /**
4 | * Schema for retrieving RUM events.
5 | * Defines parameters for querying RUM events within a time window.
6 | *
7 | * @param query - Datadog RUM query string
8 | * @param from - Start time in epoch seconds
9 | * @param to - End time in epoch seconds
10 | * @param limit - Maximum number of events to return (default: 100)
11 | */
12 | export const GetRumEventsZodSchema = z.object({
13 | query: z.string().default('').describe('Datadog RUM query string'),
14 | from: z.number().describe('Start time in epoch seconds'),
15 | to: z.number().describe('End time in epoch seconds'),
16 | limit: z
17 | .number()
18 | .optional()
19 | .default(100)
20 | .describe('Maximum number of events to return. Default is 100.'),
21 | })
22 |
23 | /**
24 | * Schema for retrieving RUM applications.
25 | * Returns a list of all RUM applications in the organization.
26 | */
27 | export const GetRumApplicationsZodSchema = z.object({})
28 |
29 | /**
30 | * Schema for retrieving unique user session counts.
31 | * Defines parameters for querying session counts within a time window.
32 | *
33 | * @param query - Optional. Additional query filter for RUM search. Defaults to "*" (all events)
34 | * @param from - Start time in epoch seconds
35 | * @param to - End time in epoch seconds
36 | * @param groupBy - Optional. Dimension to group results by (e.g., 'application.name')
37 | */
38 | export const GetRumGroupedEventCountZodSchema = z.object({
39 | query: z
40 | .string()
41 | .default('*')
42 | .describe('Optional query filter for RUM search'),
43 | from: z.number().describe('Start time in epoch seconds'),
44 | to: z.number().describe('End time in epoch seconds'),
45 | groupBy: z
46 | .string()
47 | .optional()
48 | .default('application.name')
49 | .describe('Dimension to group results by. Default is application.name'),
50 | })
51 |
52 | /**
53 | * Schema for retrieving page performance metrics.
54 | * Defines parameters for querying performance metrics within a time window.
55 | *
56 | * @param query - Optional. Additional query filter for RUM search. Defaults to "*" (all events)
57 | * @param from - Start time in epoch seconds
58 | * @param to - End time in epoch seconds
59 | * @param metricNames - Array of metric names to retrieve (e.g., 'view.load_time', 'view.first_contentful_paint')
60 | */
61 | export const GetRumPagePerformanceZodSchema = z.object({
62 | query: z
63 | .string()
64 | .default('*')
65 | .describe('Optional query filter for RUM search'),
66 | from: z.number().describe('Start time in epoch seconds'),
67 | to: z.number().describe('End time in epoch seconds'),
68 | metricNames: z
69 | .array(z.string())
70 | .default([
71 | 'view.load_time',
72 | 'view.first_contentful_paint',
73 | 'view.largest_contentful_paint',
74 | ])
75 | .describe('Array of metric names to retrieve'),
76 | })
77 |
78 | /**
79 | * Schema for retrieving RUM page waterfall data.
80 | * Defines parameters for querying waterfall data within a time window.
81 | *
82 | * @param application - Application name or ID to filter events
83 | * @param sessionId - Session ID to filter events
84 | * @param from - Start time in epoch seconds
85 | * @param to - End time in epoch seconds
86 | */
87 | export const GetRumPageWaterfallZodSchema = z.object({
88 | applicationName: z.string().describe('Application name to filter events'),
89 | sessionId: z.string().describe('Session ID to filter events'),
90 | })
91 |
```
--------------------------------------------------------------------------------
/src/tools/hosts/schema.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod'
2 |
3 | /**
4 | * Zod schemas for validating input parameters for Datadog host management operations.
5 | * These schemas define the expected shape and types of data for each host-related tool.
6 | */
7 |
8 | /**
9 | * Schema for muting a host in Datadog.
10 | * Defines required and optional parameters for temporarily silencing a host's alerts.
11 | *
12 | * @param hostname - Required. Identifies the host to be muted
13 | * @param message - Optional. Adds context about why the host is being muted
14 | * @param end - Optional. Unix timestamp defining when the mute should automatically expire
15 | * @param override - Optional. Controls whether to replace an existing mute's end time
16 | */
17 | export const MuteHostZodSchema = z.object({
18 | hostname: z.string().describe('The name of the host to mute'),
19 | message: z
20 | .string()
21 | .optional()
22 | .describe('Message to associate with the muting of this host'),
23 | end: z
24 | .number()
25 | .int()
26 | .optional()
27 | .describe('POSIX timestamp for when the mute should end'),
28 | override: z
29 | .boolean()
30 | .optional()
31 | .default(false)
32 | .describe(
33 | 'If true and the host is already muted, replaces existing end time',
34 | ),
35 | })
36 |
37 | /**
38 | * Schema for unmuting a host in Datadog.
39 | * Defines parameters for re-enabling alerts for a previously muted host.
40 | *
41 | * @param hostname - Required. Identifies the host to be unmuted
42 | */
43 | export const UnmuteHostZodSchema = z.object({
44 | hostname: z.string().describe('The name of the host to unmute'),
45 | })
46 |
47 | /**
48 | * Schema for retrieving active host counts from Datadog.
49 | * Defines parameters for querying the number of reporting hosts within a time window.
50 | *
51 | * @param from - Optional. Time window in seconds to check for host activity
52 | * Defaults to 7200 seconds (2 hours)
53 | */
54 | export const GetActiveHostsCountZodSchema = z.object({
55 | from: z
56 | .number()
57 | .int()
58 | .optional()
59 | .default(7200)
60 | .describe(
61 | 'Number of seconds from which you want to get total number of active hosts (defaults to 2h)',
62 | ),
63 | })
64 |
65 | /**
66 | * Schema for listing and filtering hosts in Datadog.
67 | * Defines comprehensive parameters for querying and filtering host information.
68 | *
69 | * @param filter - Optional. Search string to filter hosts
70 | * @param sort_field - Optional. Field to sort results by
71 | * @param sort_dir - Optional. Sort direction ('asc' or 'desc')
72 | * @param start - Optional. Pagination offset
73 | * @param count - Optional. Number of hosts to return (max 1000)
74 | * @param from - Optional. Unix timestamp to start searching from
75 | * @param include_muted_hosts_data - Optional. Include muting information
76 | * @param include_hosts_metadata - Optional. Include detailed host metadata
77 | */
78 | export const ListHostsZodSchema = z.object({
79 | filter: z.string().optional().describe('Filter string for search results'),
80 | sort_field: z.string().optional().describe('Field to sort hosts by'),
81 | sort_dir: z.string().optional().describe('Sort direction (asc/desc)'),
82 | start: z.number().int().optional().describe('Starting offset for pagination'),
83 | count: z
84 | .number()
85 | .int()
86 | .max(1000)
87 | .optional()
88 | .describe('Max number of hosts to return (max: 1000)'),
89 | from: z
90 | .number()
91 | .int()
92 | .optional()
93 | .describe('Search hosts from this UNIX timestamp'),
94 | include_muted_hosts_data: z
95 | .boolean()
96 | .optional()
97 | .describe('Include muted hosts status and expiry'),
98 | include_hosts_metadata: z
99 | .boolean()
100 | .optional()
101 | .describe('Include host metadata (version, platform, etc)'),
102 | })
103 |
```
--------------------------------------------------------------------------------
/tests/utils/datadog.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect } from 'vitest'
2 | import {
3 | ApiKeyAuthAuthentication,
4 | AppKeyAuthAuthentication,
5 | } from '@datadog/datadog-api-client/dist/packages/datadog-api-client-common'
6 | import { createDatadogConfig, getDatadogSite } from '../../src/utils/datadog'
7 |
8 | describe('createDatadogConfig', () => {
9 | it('should create a datadog config with custom site when DATADOG_SITE is configured', () => {
10 | const datadogConfig = createDatadogConfig({
11 | apiKeyAuth: 'test-api-key',
12 | appKeyAuth: 'test-app-key',
13 | site: 'us3.datadoghq.com',
14 | })
15 | expect(datadogConfig.authMethods).toEqual({
16 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
17 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
18 | })
19 | expect(datadogConfig.servers[0]?.getConfiguration()?.site).toBe(
20 | 'us3.datadoghq.com',
21 | )
22 | })
23 |
24 | it('should create a datadog config with default site when DATADOG_SITE is not configured', () => {
25 | const datadogConfig = createDatadogConfig({
26 | apiKeyAuth: 'test-api-key',
27 | appKeyAuth: 'test-app-key',
28 | })
29 | expect(datadogConfig.authMethods).toEqual({
30 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
31 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
32 | })
33 | expect(datadogConfig.servers[0]?.getConfiguration()?.site).toBe(
34 | 'datadoghq.com',
35 | )
36 | })
37 | })
38 |
39 | describe('createDatadogConfig', () => {
40 | it('should create a datadog config with custom subdomain when DATADOG_SUBDOMAIN is configured', () => {
41 | const datadogConfig = createDatadogConfig({
42 | apiKeyAuth: 'test-api-key',
43 | appKeyAuth: 'test-app-key',
44 | subdomain: 'youryour-subdomain',
45 | })
46 | expect(datadogConfig.authMethods).toEqual({
47 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
48 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
49 | })
50 | expect(datadogConfig.servers[0]?.getConfiguration()?.subdomain).toBe(
51 | 'youryour-subdomain',
52 | )
53 | })
54 |
55 | it('should create a datadog config with default subdomain when DATADOG_SUBDOMAIN is not configured', () => {
56 | const datadogConfig = createDatadogConfig({
57 | apiKeyAuth: 'test-api-key',
58 | appKeyAuth: 'test-app-key',
59 | })
60 | expect(datadogConfig.authMethods).toEqual({
61 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
62 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
63 | })
64 | expect(datadogConfig.servers[0]?.getConfiguration()?.subdomain).toBe('api')
65 | })
66 |
67 | it('should throw an error when DATADOG_API_KEY are not configured', () => {
68 | expect(() =>
69 | createDatadogConfig({
70 | apiKeyAuth: '',
71 | appKeyAuth: 'test-app-key',
72 | }),
73 | ).toThrow('Datadog API key and APP key are required')
74 | })
75 |
76 | it('should throw an error when DATADOG_APP_KEY are not configured', () => {
77 | expect(() =>
78 | createDatadogConfig({
79 | apiKeyAuth: 'test-api-key',
80 | appKeyAuth: '',
81 | }),
82 | ).toThrow('Datadog API key and APP key are required')
83 | })
84 | })
85 |
86 | describe('getDatadogSite', () => {
87 | it('should return custom site when DATADOG_SITE is configured', () => {
88 | const datadogConfig = createDatadogConfig({
89 | apiKeyAuth: 'test-api-key',
90 | appKeyAuth: 'test-app-key',
91 | site: 'us3.datadoghq.com',
92 | })
93 | const site = getDatadogSite(datadogConfig)
94 | expect(site).toBe('us3.datadoghq.com')
95 | })
96 |
97 | it('should return default site when DATADOG_SITE is not configured', () => {
98 | const datadogConfig = createDatadogConfig({
99 | apiKeyAuth: 'test-api-key',
100 | appKeyAuth: 'test-app-key',
101 | })
102 | const site = getDatadogSite(datadogConfig)
103 | expect(site).toBe('datadoghq.com')
104 | })
105 | })
106 |
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env node
2 |
3 | /**
4 | * This script sets up the mcp-server-datadog.
5 | * It initializes an MCP server that integrates with Datadog for incident management.
6 | * By leveraging MCP, this server can list and retrieve incidents via the Datadog incident API.
7 | * With a design built for scalability, future integrations with additional Datadog APIs are anticipated.
8 | */
9 |
10 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'
11 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
12 | import {
13 | CallToolRequestSchema,
14 | ListToolsRequestSchema,
15 | } from '@modelcontextprotocol/sdk/types.js'
16 | import { log, mcpDatadogVersion } from './utils/helper'
17 | import { INCIDENT_TOOLS, createIncidentToolHandlers } from './tools/incident'
18 | import { METRICS_TOOLS, createMetricsToolHandlers } from './tools/metrics'
19 | import { LOGS_TOOLS, createLogsToolHandlers } from './tools/logs'
20 | import { MONITORS_TOOLS, createMonitorsToolHandlers } from './tools/monitors'
21 | import {
22 | DASHBOARDS_TOOLS,
23 | createDashboardsToolHandlers,
24 | } from './tools/dashboards'
25 | import { TRACES_TOOLS, createTracesToolHandlers } from './tools/traces'
26 | import { HOSTS_TOOLS, createHostsToolHandlers } from './tools/hosts'
27 | import { ToolHandlers } from './utils/types'
28 | import { createDatadogConfig } from './utils/datadog'
29 | import { createDowntimesToolHandlers, DOWNTIMES_TOOLS } from './tools/downtimes'
30 | import { createRumToolHandlers, RUM_TOOLS } from './tools/rum'
31 | import { v2, v1 } from '@datadog/datadog-api-client'
32 |
33 | const server = new Server(
34 | {
35 | name: 'Datadog MCP Server',
36 | version: mcpDatadogVersion,
37 | },
38 | {
39 | capabilities: {
40 | tools: {},
41 | },
42 | },
43 | )
44 |
45 | server.onerror = (error) => {
46 | log('error', `Server error: ${error.message}`, error.stack)
47 | }
48 |
49 | /**
50 | * Handler that retrieves the list of available tools in the mcp-server-datadog.
51 | * Currently, it provides incident management functionalities by integrating with Datadog's incident APIs.
52 | */
53 | server.setRequestHandler(ListToolsRequestSchema, async () => {
54 | return {
55 | tools: [
56 | ...INCIDENT_TOOLS,
57 | ...METRICS_TOOLS,
58 | ...LOGS_TOOLS,
59 | ...MONITORS_TOOLS,
60 | ...DASHBOARDS_TOOLS,
61 | ...TRACES_TOOLS,
62 | ...HOSTS_TOOLS,
63 | ...DOWNTIMES_TOOLS,
64 | ...RUM_TOOLS,
65 | ],
66 | }
67 | })
68 |
69 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
70 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
71 | }
72 |
73 | const datadogConfig = createDatadogConfig({
74 | apiKeyAuth: process.env.DATADOG_API_KEY,
75 | appKeyAuth: process.env.DATADOG_APP_KEY,
76 | site: process.env.DATADOG_SITE,
77 | subdomain: process.env.DATADOG_SUBDOMAIN,
78 | })
79 |
80 | const TOOL_HANDLERS: ToolHandlers = {
81 | ...createIncidentToolHandlers(new v2.IncidentsApi(datadogConfig)),
82 | ...createMetricsToolHandlers(new v1.MetricsApi(datadogConfig)),
83 | ...createLogsToolHandlers(new v2.LogsApi(datadogConfig)),
84 | ...createMonitorsToolHandlers(new v1.MonitorsApi(datadogConfig)),
85 | ...createDashboardsToolHandlers(new v1.DashboardsApi(datadogConfig)),
86 | ...createTracesToolHandlers(new v2.SpansApi(datadogConfig)),
87 | ...createHostsToolHandlers(new v1.HostsApi(datadogConfig)),
88 | ...createDowntimesToolHandlers(new v1.DowntimesApi(datadogConfig)),
89 | ...createRumToolHandlers(new v2.RUMApi(datadogConfig)),
90 | }
91 | /**
92 | * Handler for invoking Datadog-related tools in the mcp-server-datadog.
93 | * The TOOL_HANDLERS object contains various tools that interact with different Datadog APIs.
94 | * By specifying the tool name in the request, the LLM can select and utilize the required tool.
95 | */
96 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
97 | try {
98 | if (TOOL_HANDLERS[request.params.name]) {
99 | return await TOOL_HANDLERS[request.params.name](request)
100 | }
101 | throw new Error('Unknown tool')
102 | } catch (unknownError) {
103 | const error =
104 | unknownError instanceof Error
105 | ? unknownError
106 | : new Error(String(unknownError))
107 | log(
108 | 'error',
109 | `Request: ${request.params.name}, ${JSON.stringify(request.params.arguments)} failed`,
110 | error.message,
111 | error.stack,
112 | )
113 | throw error
114 | }
115 | })
116 |
117 | /**
118 | * Initializes and starts the mcp-server-datadog using stdio transport,
119 | * which sends and receives data through standard input and output.
120 | */
121 | async function main() {
122 | const transport = new StdioServerTransport()
123 | await server.connect(transport)
124 | }
125 |
126 | main().catch((error) => {
127 | log('error', 'Server error:', error)
128 | process.exit(1)
129 | })
130 |
```
--------------------------------------------------------------------------------
/src/tools/hosts/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | ListHostsZodSchema,
6 | GetActiveHostsCountZodSchema,
7 | MuteHostZodSchema,
8 | UnmuteHostZodSchema,
9 | } from './schema'
10 |
11 | /**
12 | * This module implements Datadog host management tools for muting, unmuting,
13 | * and retrieving host information using the Datadog API client.
14 | */
15 |
16 | /** Available host management tool names */
17 | type HostsToolName =
18 | | 'list_hosts'
19 | | 'get_active_hosts_count'
20 | | 'mute_host'
21 | | 'unmute_host'
22 | /** Extended tool type with host-specific operations */
23 | type HostsTool = ExtendedTool<HostsToolName>
24 |
25 | /**
26 | * Array of available host management tools.
27 | * Each tool is created with a schema for input validation and includes a description.
28 | */
29 | export const HOSTS_TOOLS: HostsTool[] = [
30 | createToolSchema(MuteHostZodSchema, 'mute_host', 'Mute a host in Datadog'),
31 | createToolSchema(
32 | UnmuteHostZodSchema,
33 | 'unmute_host',
34 | 'Unmute a host in Datadog',
35 | ),
36 | createToolSchema(
37 | ListHostsZodSchema,
38 | 'list_hosts',
39 | 'Get list of hosts from Datadog',
40 | ),
41 | createToolSchema(
42 | GetActiveHostsCountZodSchema,
43 | 'get_active_hosts_count',
44 | 'Get the total number of active hosts in Datadog (defaults to last 5 minutes)',
45 | ),
46 | ] as const
47 |
48 | /** Type definition for host management tool implementations */
49 | type HostsToolHandlers = ToolHandlers<HostsToolName>
50 |
51 | /**
52 | * Implementation of host management tool handlers.
53 | * Each handler validates inputs using Zod schemas and interacts with the Datadog API.
54 | */
55 | export const createHostsToolHandlers = (
56 | apiInstance: v1.HostsApi,
57 | ): HostsToolHandlers => {
58 | return {
59 | /**
60 | * Mutes a specified host in Datadog.
61 | * Silences alerts and notifications for the host until unmuted or until the specified end time.
62 | */
63 | mute_host: async (request) => {
64 | const { hostname, message, end, override } = MuteHostZodSchema.parse(
65 | request.params.arguments,
66 | )
67 |
68 | await apiInstance.muteHost({
69 | hostName: hostname,
70 | body: {
71 | message,
72 | end,
73 | override,
74 | },
75 | })
76 |
77 | return {
78 | content: [
79 | {
80 | type: 'text',
81 | text: JSON.stringify(
82 | {
83 | status: 'success',
84 | message: `Host ${hostname} has been muted successfully${message ? ` with message: ${message}` : ''}${end ? ` until ${new Date(end * 1000).toISOString()}` : ''}`,
85 | },
86 | null,
87 | 2,
88 | ),
89 | },
90 | ],
91 | }
92 | },
93 |
94 | /**
95 | * Unmutes a previously muted host in Datadog.
96 | * Re-enables alerts and notifications for the specified host.
97 | */
98 | unmute_host: async (request) => {
99 | const { hostname } = UnmuteHostZodSchema.parse(request.params.arguments)
100 |
101 | await apiInstance.unmuteHost({
102 | hostName: hostname,
103 | })
104 |
105 | return {
106 | content: [
107 | {
108 | type: 'text',
109 | text: JSON.stringify(
110 | {
111 | status: 'success',
112 | message: `Host ${hostname} has been unmuted successfully`,
113 | },
114 | null,
115 | 2,
116 | ),
117 | },
118 | ],
119 | }
120 | },
121 |
122 | /**
123 | * Retrieves counts of active and up hosts in Datadog.
124 | * Provides total counts of hosts that are reporting and operational.
125 | */
126 | get_active_hosts_count: async (request) => {
127 | const { from } = GetActiveHostsCountZodSchema.parse(
128 | request.params.arguments,
129 | )
130 |
131 | const response = await apiInstance.getHostTotals({
132 | from,
133 | })
134 |
135 | return {
136 | content: [
137 | {
138 | type: 'text',
139 | text: JSON.stringify(
140 | {
141 | total_active: response.totalActive || 0, // Total number of active hosts (UP and reporting) to Datadog
142 | total_up: response.totalUp || 0, // Number of hosts that are UP and reporting to Datadog
143 | },
144 | null,
145 | 2,
146 | ),
147 | },
148 | ],
149 | }
150 | },
151 |
152 | /**
153 | * Lists and filters hosts monitored by Datadog.
154 | * Supports comprehensive querying with filtering, sorting, and pagination.
155 | * Returns detailed host information including status, metadata, and monitoring data.
156 | */
157 | list_hosts: async (request) => {
158 | const {
159 | filter,
160 | sort_field,
161 | sort_dir,
162 | start,
163 | count,
164 | from,
165 | include_muted_hosts_data,
166 | include_hosts_metadata,
167 | } = ListHostsZodSchema.parse(request.params.arguments)
168 |
169 | const response = await apiInstance.listHosts({
170 | filter,
171 | sortField: sort_field,
172 | sortDir: sort_dir,
173 | start,
174 | count,
175 | from,
176 | includeMutedHostsData: include_muted_hosts_data,
177 | includeHostsMetadata: include_hosts_metadata,
178 | })
179 |
180 | if (!response.hostList) {
181 | throw new Error('No hosts data returned')
182 | }
183 |
184 | // Transform API response into a more convenient format
185 | const hosts = response.hostList.map((host) => ({
186 | name: host.name,
187 | id: host.id,
188 | aliases: host.aliases,
189 | apps: host.apps,
190 | mute: host.isMuted,
191 | last_reported: host.lastReportedTime,
192 | meta: host.meta,
193 | metrics: host.metrics,
194 | sources: host.sources,
195 | up: host.up,
196 | url: `https://app.datadoghq.com/infrastructure?host=${host.name}`,
197 | }))
198 |
199 | return {
200 | content: [
201 | {
202 | type: 'text',
203 | text: `Hosts: ${JSON.stringify(hosts)}`,
204 | },
205 | ],
206 | }
207 | },
208 | }
209 | }
210 |
```
--------------------------------------------------------------------------------
/tests/tools/dashboards.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createDashboardsToolHandlers } from '../../src/tools/dashboards/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const dashboardEndpoint = `${baseUrl}/v1/dashboard`
11 |
12 | describe('Dashboards Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.DashboardsApi(datadogConfig)
24 | const toolHandlers = createDashboardsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/dashboards/#get-all-dashboards
27 | describe.concurrent('list_dashboards', async () => {
28 | it('should list dashboards', async () => {
29 | const mockHandler = http.get(dashboardEndpoint, async () => {
30 | return HttpResponse.json({
31 | dashboards: [
32 | {
33 | id: 'q5j-nti-fv6',
34 | type: 'host_timeboard',
35 | },
36 | ],
37 | })
38 | })
39 |
40 | const server = setupServer(mockHandler)
41 |
42 | await server.boundary(async () => {
43 | const request = createMockToolRequest('list_dashboards', {
44 | name: 'test name',
45 | tags: ['test_tag'],
46 | })
47 | const response = (await toolHandlers.list_dashboards(
48 | request,
49 | )) as unknown as DatadogToolResponse
50 | expect(response.content[0].text).toContain('Dashboards')
51 | })()
52 |
53 | server.close()
54 | })
55 |
56 | it('should handle authentication errors', async () => {
57 | const mockHandler = http.get(dashboardEndpoint, async () => {
58 | return HttpResponse.json(
59 | { errors: ['dummy authentication error'] },
60 | { status: 403 },
61 | )
62 | })
63 |
64 | const server = setupServer(mockHandler)
65 |
66 | await server.boundary(async () => {
67 | const request = createMockToolRequest('list_dashboards', {
68 | name: 'test',
69 | })
70 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
71 | 'dummy authentication error',
72 | )
73 | })()
74 |
75 | server.close()
76 | })
77 |
78 | it('should handle too many requests', async () => {
79 | const mockHandler = http.get(dashboardEndpoint, async () => {
80 | return HttpResponse.json(
81 | { errors: ['dummy too many requests'] },
82 | { status: 429 },
83 | )
84 | })
85 |
86 | const server = setupServer(mockHandler)
87 |
88 | await server.boundary(async () => {
89 | const request = createMockToolRequest('list_dashboards', {
90 | name: 'test',
91 | })
92 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
93 | 'dummy too many requests',
94 | )
95 | })()
96 |
97 | server.close()
98 | })
99 |
100 | it('should handle unknown errors', async () => {
101 | const mockHandler = http.get(dashboardEndpoint, async () => {
102 | return HttpResponse.json(
103 | { errors: ['dummy unknown error'] },
104 | { status: 500 },
105 | )
106 | })
107 |
108 | const server = setupServer(mockHandler)
109 |
110 | await server.boundary(async () => {
111 | const request = createMockToolRequest('list_dashboards', {
112 | name: 'test',
113 | })
114 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
115 | 'dummy unknown error',
116 | )
117 | })()
118 |
119 | server.close()
120 | })
121 | })
122 |
123 | // https://docs.datadoghq.com/ja/api/latest/dashboards/#get-a-dashboard
124 | describe.concurrent('get_dashboard', async () => {
125 | it('should get a dashboard', async () => {
126 | const dashboardId = '123456789'
127 | const mockHandler = http.get(
128 | `${dashboardEndpoint}/${dashboardId}`,
129 | async () => {
130 | return HttpResponse.json({
131 | id: '123456789',
132 | title: 'Dashboard',
133 | layout_type: 'ordered',
134 | widgets: [],
135 | })
136 | },
137 | )
138 |
139 | const server = setupServer(mockHandler)
140 |
141 | await server.boundary(async () => {
142 | const request = createMockToolRequest('get_dashboard', {
143 | dashboardId,
144 | })
145 | const response = (await toolHandlers.get_dashboard(
146 | request,
147 | )) as unknown as DatadogToolResponse
148 |
149 | expect(response.content[0].text).toContain('123456789')
150 | expect(response.content[0].text).toContain('Dashboard')
151 | expect(response.content[0].text).toContain('ordered')
152 | })()
153 |
154 | server.close()
155 | })
156 |
157 | it('should handle not found errors', async () => {
158 | const dashboardId = '999999999'
159 | const mockHandler = http.get(
160 | `${dashboardEndpoint}/${dashboardId}`,
161 | async () => {
162 | return HttpResponse.json({ errors: ['Not found'] }, { status: 404 })
163 | },
164 | )
165 |
166 | const server = setupServer(mockHandler)
167 |
168 | await server.boundary(async () => {
169 | const request = createMockToolRequest('get_dashboard', {
170 | dashboardId,
171 | })
172 | await expect(toolHandlers.get_dashboard(request)).rejects.toThrow(
173 | 'Not found',
174 | )
175 | })()
176 |
177 | server.close()
178 | })
179 |
180 | it('should handle server errors', async () => {
181 | const dashboardId = '123456789'
182 | const mockHandler = http.get(
183 | `${dashboardEndpoint}/${dashboardId}`,
184 | async () => {
185 | return HttpResponse.json(
186 | { errors: ['Internal server error'] },
187 | { status: 500 },
188 | )
189 | },
190 | )
191 |
192 | const server = setupServer(mockHandler)
193 |
194 | await server.boundary(async () => {
195 | const request = createMockToolRequest('get_dashboard', {
196 | dashboardId,
197 | })
198 | await expect(toolHandlers.get_dashboard(request)).rejects.toThrow(
199 | 'Internal server error',
200 | )
201 | })()
202 |
203 | server.close()
204 | })
205 | })
206 | })
207 |
```
--------------------------------------------------------------------------------
/tests/tools/metrics.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createMetricsToolHandlers } from '../../src/tools/metrics/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const metricsEndpoint = `${baseUrl}/v1/query`
11 |
12 | describe('Metrics Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.MetricsApi(datadogConfig)
24 | const toolHandlers = createMetricsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/metrics/#query-timeseries-data-across-multiple-products
27 | describe.concurrent('query_metrics', async () => {
28 | it('should query metrics data', async () => {
29 | const mockHandler = http.get(metricsEndpoint, async () => {
30 | return HttpResponse.json({
31 | status: 'ok',
32 | query: 'avg:system.cpu.user{*}',
33 | series: [
34 | {
35 | metric: 'system.cpu.user',
36 | display_name: 'system.cpu.user',
37 | pointlist: [
38 | [1640995000000, 23.45],
39 | [1640995060000, 24.12],
40 | [1640995120000, 22.89],
41 | [1640995180000, 25.67],
42 | ],
43 | scope: 'host:web-01',
44 | expression: 'avg:system.cpu.user{*}',
45 | unit: [
46 | {
47 | family: 'percentage',
48 | scale_factor: 1,
49 | name: 'percent',
50 | short_name: '%',
51 | },
52 | ],
53 | },
54 | {
55 | metric: 'system.cpu.user',
56 | display_name: 'system.cpu.user',
57 | pointlist: [
58 | [1640995000000, 18.32],
59 | [1640995060000, 19.01],
60 | [1640995120000, 17.76],
61 | [1640995180000, 20.45],
62 | ],
63 | scope: 'host:web-02',
64 | expression: 'avg:system.cpu.user{*}',
65 | unit: [
66 | {
67 | family: 'percentage',
68 | scale_factor: 1,
69 | name: 'percent',
70 | short_name: '%',
71 | },
72 | ],
73 | },
74 | ],
75 | from_date: 1640995000000,
76 | to_date: 1641095000000,
77 | group_by: ['host'],
78 | })
79 | })
80 |
81 | const server = setupServer(mockHandler)
82 |
83 | await server.boundary(async () => {
84 | const request = createMockToolRequest('query_metrics', {
85 | from: 1640995000,
86 | to: 1641095000,
87 | query: 'avg:system.cpu.user{*}',
88 | })
89 | const response = (await toolHandlers.query_metrics(
90 | request,
91 | )) as unknown as DatadogToolResponse
92 |
93 | expect(response.content[0].text).toContain('Queried metrics data:')
94 | expect(response.content[0].text).toContain('system.cpu.user')
95 | expect(response.content[0].text).toContain('host:web-01')
96 | expect(response.content[0].text).toContain('host:web-02')
97 | expect(response.content[0].text).toContain('23.45')
98 | })()
99 |
100 | server.close()
101 | })
102 |
103 | it('should handle empty response', async () => {
104 | const mockHandler = http.get(metricsEndpoint, async () => {
105 | return HttpResponse.json({
106 | status: 'ok',
107 | query: 'avg:non.existent.metric{*}',
108 | series: [],
109 | from_date: 1640995000000,
110 | to_date: 1641095000000,
111 | })
112 | })
113 |
114 | const server = setupServer(mockHandler)
115 |
116 | await server.boundary(async () => {
117 | const request = createMockToolRequest('query_metrics', {
118 | from: 1640995000,
119 | to: 1641095000,
120 | query: 'avg:non.existent.metric{*}',
121 | })
122 | const response = (await toolHandlers.query_metrics(
123 | request,
124 | )) as unknown as DatadogToolResponse
125 |
126 | expect(response.content[0].text).toContain('Queried metrics data:')
127 | expect(response.content[0].text).toContain('series":[]')
128 | })()
129 |
130 | server.close()
131 | })
132 |
133 | it('should handle failed query status', async () => {
134 | const mockHandler = http.get(metricsEndpoint, async () => {
135 | return HttpResponse.json({
136 | status: 'error',
137 | message: 'Invalid query format',
138 | query: 'invalid:query:format',
139 | })
140 | })
141 |
142 | const server = setupServer(mockHandler)
143 |
144 | await server.boundary(async () => {
145 | const request = createMockToolRequest('query_metrics', {
146 | from: 1640995000,
147 | to: 1641095000,
148 | query: 'invalid:query:format',
149 | })
150 | const response = (await toolHandlers.query_metrics(
151 | request,
152 | )) as unknown as DatadogToolResponse
153 |
154 | expect(response.content[0].text).toContain('status":"error"')
155 | expect(response.content[0].text).toContain('Invalid query format')
156 | })()
157 |
158 | server.close()
159 | })
160 |
161 | it('should handle authentication errors', async () => {
162 | const mockHandler = http.get(metricsEndpoint, async () => {
163 | return HttpResponse.json(
164 | { errors: ['Authentication failed'] },
165 | { status: 403 },
166 | )
167 | })
168 |
169 | const server = setupServer(mockHandler)
170 |
171 | await server.boundary(async () => {
172 | const request = createMockToolRequest('query_metrics', {
173 | from: 1640995000,
174 | to: 1641095000,
175 | query: 'avg:system.cpu.user{*}',
176 | })
177 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow()
178 | })()
179 |
180 | server.close()
181 | })
182 |
183 | it('should handle rate limit errors', async () => {
184 | const mockHandler = http.get(metricsEndpoint, async () => {
185 | return HttpResponse.json(
186 | { errors: ['Rate limit exceeded'] },
187 | { status: 429 },
188 | )
189 | })
190 |
191 | const server = setupServer(mockHandler)
192 |
193 | await server.boundary(async () => {
194 | const request = createMockToolRequest('query_metrics', {
195 | from: 1640995000,
196 | to: 1641095000,
197 | query: 'avg:system.cpu.user{*}',
198 | })
199 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow(
200 | 'Rate limit exceeded',
201 | )
202 | })()
203 |
204 | server.close()
205 | })
206 |
207 | it('should handle invalid time range errors', async () => {
208 | const mockHandler = http.get(metricsEndpoint, async () => {
209 | return HttpResponse.json(
210 | { errors: ['Time range exceeds allowed limit'] },
211 | { status: 400 },
212 | )
213 | })
214 |
215 | const server = setupServer(mockHandler)
216 |
217 | await server.boundary(async () => {
218 | // Using a very large time range that might exceed limits
219 | const request = createMockToolRequest('query_metrics', {
220 | from: 1600000000, // Very old date
221 | to: 1700000000, // Very recent date
222 | query: 'avg:system.cpu.user{*}',
223 | })
224 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow(
225 | 'Time range exceeds allowed limit',
226 | )
227 | })()
228 |
229 | server.close()
230 | })
231 | })
232 | })
233 |
```
--------------------------------------------------------------------------------
/src/tools/rum/tool.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | GetRumEventsZodSchema,
6 | GetRumApplicationsZodSchema,
7 | GetRumGroupedEventCountZodSchema,
8 | GetRumPagePerformanceZodSchema,
9 | GetRumPageWaterfallZodSchema,
10 | } from './schema'
11 |
12 | type RumToolName =
13 | | 'get_rum_events'
14 | | 'get_rum_applications'
15 | | 'get_rum_grouped_event_count'
16 | | 'get_rum_page_performance'
17 | | 'get_rum_page_waterfall'
18 | type RumTool = ExtendedTool<RumToolName>
19 |
20 | export const RUM_TOOLS: RumTool[] = [
21 | createToolSchema(
22 | GetRumApplicationsZodSchema,
23 | 'get_rum_applications',
24 | 'Get all RUM applications in the organization',
25 | ),
26 | createToolSchema(
27 | GetRumEventsZodSchema,
28 | 'get_rum_events',
29 | 'Search and retrieve RUM events from Datadog',
30 | ),
31 | createToolSchema(
32 | GetRumGroupedEventCountZodSchema,
33 | 'get_rum_grouped_event_count',
34 | 'Search, group and count RUM events by a specified dimension',
35 | ),
36 | createToolSchema(
37 | GetRumPagePerformanceZodSchema,
38 | 'get_rum_page_performance',
39 | 'Get page (view) performance metrics from RUM data',
40 | ),
41 | createToolSchema(
42 | GetRumPageWaterfallZodSchema,
43 | 'get_rum_page_waterfall',
44 | 'Retrieve RUM page (view) waterfall data filtered by application name and session ID',
45 | ),
46 | ] as const
47 |
48 | type RumToolHandlers = ToolHandlers<RumToolName>
49 |
50 | export const createRumToolHandlers = (
51 | apiInstance: v2.RUMApi,
52 | ): RumToolHandlers => ({
53 | get_rum_applications: async (request) => {
54 | GetRumApplicationsZodSchema.parse(request.params.arguments)
55 |
56 | const response = await apiInstance.getRUMApplications()
57 |
58 | if (response.data == null) {
59 | throw new Error('No RUM applications data returned')
60 | }
61 |
62 | return {
63 | content: [
64 | {
65 | type: 'text',
66 | text: `RUM applications: ${JSON.stringify(response.data)}`,
67 | },
68 | ],
69 | }
70 | },
71 |
72 | get_rum_events: async (request) => {
73 | const { query, from, to, limit } = GetRumEventsZodSchema.parse(
74 | request.params.arguments,
75 | )
76 |
77 | const response = await apiInstance.listRUMEvents({
78 | filterQuery: query,
79 | filterFrom: new Date(from * 1000),
80 | filterTo: new Date(to * 1000),
81 | sort: 'timestamp',
82 | pageLimit: limit,
83 | })
84 |
85 | if (response.data == null) {
86 | throw new Error('No RUM events data returned')
87 | }
88 |
89 | return {
90 | content: [
91 | {
92 | type: 'text',
93 | text: `RUM events data: ${JSON.stringify(response.data)}`,
94 | },
95 | ],
96 | }
97 | },
98 |
99 | get_rum_grouped_event_count: async (request) => {
100 | const { query, from, to, groupBy } = GetRumGroupedEventCountZodSchema.parse(
101 | request.params.arguments,
102 | )
103 |
104 | // For session counts, we need to use a query to count unique sessions
105 | const response = await apiInstance.listRUMEvents({
106 | filterQuery: query !== '*' ? query : undefined,
107 | filterFrom: new Date(from * 1000),
108 | filterTo: new Date(to * 1000),
109 | sort: 'timestamp',
110 | pageLimit: 2000,
111 | })
112 |
113 | if (response.data == null) {
114 | throw new Error('No RUM events data returned')
115 | }
116 |
117 | // Extract session counts grouped by the specified dimension
118 | const sessions = new Map<string, Set<string>>()
119 |
120 | for (const event of response.data) {
121 | if (!event.attributes?.attributes) {
122 | continue
123 | }
124 |
125 | // Parse the groupBy path (e.g., 'application.id')
126 | const groupPath = groupBy.split('.') as Array<
127 | keyof typeof event.attributes.attributes
128 | >
129 |
130 | const result = getValueByPath(
131 | event.attributes.attributes,
132 | groupPath.map((path) => String(path)),
133 | )
134 | const groupValue = result.found ? String(result.value) : 'unknown'
135 |
136 | // Get or create the session set for this group
137 | if (!sessions.has(groupValue)) {
138 | sessions.set(groupValue, new Set<string>())
139 | }
140 |
141 | // Add the session ID to the set if it exists
142 | if (event.attributes.attributes.session?.id) {
143 | sessions.get(groupValue)?.add(event.attributes.attributes.session.id)
144 | }
145 | }
146 |
147 | // Convert the map to an object with counts
148 | const sessionCounts = Object.fromEntries(
149 | Array.from(sessions.entries()).map(([key, set]) => [key, set.size]),
150 | )
151 |
152 | return {
153 | content: [
154 | {
155 | type: 'text',
156 | text: `Session counts (grouped by ${groupBy}): ${JSON.stringify(sessionCounts)}`,
157 | },
158 | ],
159 | }
160 | },
161 |
162 | get_rum_page_performance: async (request) => {
163 | const { query, from, to, metricNames } =
164 | GetRumPagePerformanceZodSchema.parse(request.params.arguments)
165 |
166 | // Build a query that focuses on view events with performance metrics
167 | const viewQuery = query !== '*' ? `@type:view ${query}` : '@type:view'
168 |
169 | const response = await apiInstance.listRUMEvents({
170 | filterQuery: viewQuery,
171 | filterFrom: new Date(from * 1000),
172 | filterTo: new Date(to * 1000),
173 | sort: 'timestamp',
174 | pageLimit: 2000,
175 | })
176 |
177 | if (response.data == null) {
178 | throw new Error('No RUM events data returned')
179 | }
180 |
181 | // Extract and calculate performance metrics
182 | const metrics: Record<string, number[]> = metricNames.reduce(
183 | (acc, name) => {
184 | acc[name] = []
185 | return acc
186 | },
187 | {} as Record<string, number[]>,
188 | )
189 |
190 | for (const event of response.data) {
191 | if (!event.attributes?.attributes) {
192 | continue
193 | }
194 |
195 | // Collect each requested metric if it exists
196 | for (const metricName of metricNames) {
197 | // Handle nested properties like 'view.load_time'
198 | const metricNameParts = metricName.split('.') as Array<
199 | keyof typeof event.attributes.attributes
200 | >
201 |
202 | if (event.attributes.attributes == null) {
203 | continue
204 | }
205 |
206 | const value = metricNameParts.reduce(
207 | (acc, part) => (acc ? acc[part] : undefined),
208 | event.attributes.attributes,
209 | )
210 |
211 | // If we found a numeric value, add it to the metrics
212 | if (typeof value === 'number') {
213 | metrics[metricName].push(value)
214 | }
215 | }
216 | }
217 |
218 | // Calculate statistics for each metric
219 | const results: Record<
220 | string,
221 | { avg: number; min: number; max: number; count: number }
222 | > = Object.entries(metrics).reduce(
223 | (acc, [name, values]) => {
224 | if (values.length > 0) {
225 | const sum = values.reduce((a, b) => a + b, 0)
226 | acc[name] = {
227 | avg: sum / values.length,
228 | min: Math.min(...values),
229 | max: Math.max(...values),
230 | count: values.length,
231 | }
232 | } else {
233 | acc[name] = { avg: 0, min: 0, max: 0, count: 0 }
234 | }
235 | return acc
236 | },
237 | {} as Record<
238 | string,
239 | { avg: number; min: number; max: number; count: number }
240 | >,
241 | )
242 |
243 | return {
244 | content: [
245 | {
246 | type: 'text',
247 | text: `Page performance metrics: ${JSON.stringify(results)}`,
248 | },
249 | ],
250 | }
251 | },
252 |
253 | get_rum_page_waterfall: async (request) => {
254 | const { applicationName, sessionId } = GetRumPageWaterfallZodSchema.parse(
255 | request.params.arguments,
256 | )
257 |
258 | const response = await apiInstance.listRUMEvents({
259 | filterQuery: `@application.name:${applicationName} @session.id:${sessionId}`,
260 | sort: 'timestamp',
261 | pageLimit: 2000,
262 | })
263 |
264 | if (response.data == null) {
265 | throw new Error('No RUM events data returned')
266 | }
267 |
268 | return {
269 | content: [
270 | {
271 | type: 'text',
272 | text: `Waterfall data: ${JSON.stringify(response.data)}`,
273 | },
274 | ],
275 | }
276 | },
277 | })
278 |
279 | // Get the group value using a recursive function approach
280 | const getValueByPath = (
281 | obj: Record<string, unknown>,
282 | path: string[],
283 | index = 0,
284 | ): { value: unknown; found: boolean } => {
285 | if (index >= path.length) {
286 | return { value: obj, found: true }
287 | }
288 |
289 | const key = path[index]
290 | const typedObj = obj as Record<string, unknown>
291 |
292 | if (typedObj[key] === undefined) {
293 | return { value: null, found: false }
294 | }
295 |
296 | return getValueByPath(
297 | typedObj[key] as Record<string, unknown>,
298 | path,
299 | index + 1,
300 | )
301 | }
302 |
```
--------------------------------------------------------------------------------
/tests/tools/monitors.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createMonitorsToolHandlers } from '../../src/tools/monitors/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const monitorsEndpoint = `${baseUrl}/v1/monitor`
11 |
12 | describe('Monitors Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.MonitorsApi(datadogConfig)
24 | const toolHandlers = createMonitorsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/monitors/#get-all-monitor-details
27 | describe.concurrent('get_monitors', async () => {
28 | it('should list monitors', async () => {
29 | const mockHandler = http.get(monitorsEndpoint, async () => {
30 | return HttpResponse.json([
31 | {
32 | id: 12345,
33 | name: 'Test API Monitor',
34 | type: 'metric alert',
35 | message: 'CPU usage is too high',
36 | tags: ['env:test', 'service:api'],
37 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
38 | overall_state: 'Alert',
39 | created: '2023-01-01T00:00:00.000Z',
40 | modified: '2023-01-02T00:00:00.000Z',
41 | },
42 | {
43 | id: 67890,
44 | name: 'Test Web Monitor',
45 | type: 'service check',
46 | message: 'Web service is down',
47 | tags: ['env:test', 'service:web'],
48 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
49 | overall_state: 'OK',
50 | created: '2023-02-01T00:00:00.000Z',
51 | modified: '2023-02-02T00:00:00.000Z',
52 | },
53 | ])
54 | })
55 |
56 | const server = setupServer(mockHandler)
57 |
58 | await server.boundary(async () => {
59 | const request = createMockToolRequest('get_monitors', {
60 | name: 'test-monitor',
61 | groupStates: ['alert', 'warn'],
62 | tags: ['env:test', 'service:api'],
63 | })
64 | const response = (await toolHandlers.get_monitors(
65 | request,
66 | )) as unknown as DatadogToolResponse
67 |
68 | // Check that monitors data is included
69 | expect(response.content[0].text).toContain('Monitors:')
70 | expect(response.content[0].text).toContain('Test API Monitor')
71 | expect(response.content[0].text).toContain('Test Web Monitor')
72 |
73 | // Check that summary is included
74 | expect(response.content[1].text).toContain('Summary of monitors:')
75 | expect(response.content[1].text).toContain('"alert":1')
76 | expect(response.content[1].text).toContain('"ok":1')
77 | })()
78 |
79 | server.close()
80 | })
81 |
82 | it('should handle monitors with various states', async () => {
83 | const mockHandler = http.get(monitorsEndpoint, async () => {
84 | return HttpResponse.json([
85 | {
86 | id: 1,
87 | name: 'Alert Monitor',
88 | overall_state: 'Alert',
89 | tags: ['env:test'],
90 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
91 | type: 'metric alert',
92 | },
93 | {
94 | id: 2,
95 | name: 'Warn Monitor',
96 | overall_state: 'Warn',
97 | tags: ['env:test'],
98 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
99 | type: 'metric alert',
100 | },
101 | {
102 | id: 3,
103 | name: 'No Data Monitor',
104 | overall_state: 'No Data',
105 | tags: ['env:test'],
106 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
107 | type: 'metric alert',
108 | },
109 | {
110 | id: 4,
111 | name: 'OK Monitor',
112 | overall_state: 'OK',
113 | tags: ['env:test'],
114 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
115 | type: 'metric alert',
116 | },
117 | {
118 | id: 5,
119 | name: 'Ignored Monitor',
120 | overall_state: 'Ignored',
121 | tags: ['env:test'],
122 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
123 | type: 'metric alert',
124 | },
125 | {
126 | id: 6,
127 | name: 'Skipped Monitor',
128 | overall_state: 'Skipped',
129 | tags: ['env:test'],
130 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
131 | type: 'metric alert',
132 | },
133 | {
134 | id: 7,
135 | name: 'Unknown Monitor',
136 | overall_state: 'Unknown',
137 | tags: ['env:test'],
138 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
139 | type: 'metric alert',
140 | },
141 | ])
142 | })
143 |
144 | const server = setupServer(mockHandler)
145 |
146 | await server.boundary(async () => {
147 | const request = createMockToolRequest('get_monitors', {
148 | tags: ['env:test'],
149 | })
150 | const response = (await toolHandlers.get_monitors(
151 | request,
152 | )) as unknown as DatadogToolResponse
153 |
154 | // Check summary data has counts for all states
155 | expect(response.content[1].text).toContain('"alert":1')
156 | expect(response.content[1].text).toContain('"warn":1')
157 | expect(response.content[1].text).toContain('"noData":1')
158 | expect(response.content[1].text).toContain('"ok":1')
159 | expect(response.content[1].text).toContain('"ignored":1')
160 | expect(response.content[1].text).toContain('"skipped":1')
161 | expect(response.content[1].text).toContain('"unknown":1')
162 | })()
163 |
164 | server.close()
165 | })
166 |
167 | it('should handle empty response', async () => {
168 | const mockHandler = http.get(monitorsEndpoint, async () => {
169 | return HttpResponse.json([])
170 | })
171 |
172 | const server = setupServer(mockHandler)
173 |
174 | await server.boundary(async () => {
175 | const request = createMockToolRequest('get_monitors', {
176 | name: 'non-existent-monitor',
177 | })
178 | const response = (await toolHandlers.get_monitors(
179 | request,
180 | )) as unknown as DatadogToolResponse
181 |
182 | // Check that response contains empty array
183 | expect(response.content[0].text).toContain('Monitors: []')
184 |
185 | // Check that summary shows all zeros
186 | expect(response.content[1].text).toContain('"alert":0')
187 | expect(response.content[1].text).toContain('"warn":0')
188 | expect(response.content[1].text).toContain('"noData":0')
189 | expect(response.content[1].text).toContain('"ok":0')
190 | })()
191 |
192 | server.close()
193 | })
194 |
195 | it('should handle null response', async () => {
196 | const mockHandler = http.get(monitorsEndpoint, async () => {
197 | return HttpResponse.json(null)
198 | })
199 |
200 | const server = setupServer(mockHandler)
201 |
202 | await server.boundary(async () => {
203 | const request = createMockToolRequest('get_monitors', {})
204 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
205 | 'No monitors data returned',
206 | )
207 | })()
208 |
209 | server.close()
210 | })
211 |
212 | it('should handle authentication errors', async () => {
213 | const mockHandler = http.get(monitorsEndpoint, async () => {
214 | return HttpResponse.json(
215 | { errors: ['Authentication failed'] },
216 | { status: 403 },
217 | )
218 | })
219 |
220 | const server = setupServer(mockHandler)
221 |
222 | await server.boundary(async () => {
223 | const request = createMockToolRequest('get_monitors', {})
224 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow()
225 | })()
226 |
227 | server.close()
228 | })
229 |
230 | it('should handle rate limit errors', async () => {
231 | const mockHandler = http.get(monitorsEndpoint, async () => {
232 | return HttpResponse.json(
233 | { errors: ['Rate limit exceeded'] },
234 | { status: 429 },
235 | )
236 | })
237 |
238 | const server = setupServer(mockHandler)
239 |
240 | await server.boundary(async () => {
241 | const request = createMockToolRequest('get_monitors', {})
242 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
243 | 'Rate limit exceeded',
244 | )
245 | })()
246 |
247 | server.close()
248 | })
249 |
250 | it('should handle server errors', async () => {
251 | const mockHandler = http.get(monitorsEndpoint, async () => {
252 | return HttpResponse.json(
253 | { errors: ['Internal server error'] },
254 | { status: 500 },
255 | )
256 | })
257 |
258 | const server = setupServer(mockHandler)
259 |
260 | await server.boundary(async () => {
261 | const request = createMockToolRequest('get_monitors', {})
262 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
263 | 'Internal server error',
264 | )
265 | })()
266 |
267 | server.close()
268 | })
269 | })
270 | })
271 |
```
--------------------------------------------------------------------------------
/tests/tools/traces.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createTracesToolHandlers } from '../../src/tools/traces/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const tracesEndpoint = `${baseUrl}/v2/spans/events/search`
11 |
12 | describe('Traces Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v2.SpansApi(datadogConfig)
24 | const toolHandlers = createTracesToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/spans/#search-spans
27 | describe.concurrent('list_traces', async () => {
28 | it('should list traces with basic query', async () => {
29 | const mockHandler = http.post(tracesEndpoint, async () => {
30 | return HttpResponse.json({
31 | data: [
32 | {
33 | id: 'span-id-1',
34 | type: 'spans',
35 | attributes: {
36 | service: 'web-api',
37 | name: 'http.request',
38 | resource: 'GET /api/users',
39 | trace_id: 'trace-id-1',
40 | span_id: 'span-id-1',
41 | parent_id: 'parent-id-1',
42 | start: 1640995100000000000,
43 | duration: 500000000,
44 | error: 1,
45 | meta: {
46 | 'http.method': 'GET',
47 | 'http.status_code': '500',
48 | 'error.type': 'Internal Server Error',
49 | },
50 | },
51 | },
52 | {
53 | id: 'span-id-2',
54 | type: 'spans',
55 | attributes: {
56 | service: 'web-api',
57 | name: 'http.request',
58 | resource: 'GET /api/products',
59 | trace_id: 'trace-id-2',
60 | span_id: 'span-id-2',
61 | parent_id: 'parent-id-2',
62 | start: 1640995000000000000,
63 | duration: 300000000,
64 | error: 1,
65 | meta: {
66 | 'http.method': 'GET',
67 | 'http.status_code': '500',
68 | 'error.type': 'Internal Server Error',
69 | },
70 | },
71 | },
72 | ],
73 | meta: {
74 | page: {
75 | after: 'cursor-value',
76 | },
77 | },
78 | })
79 | })
80 |
81 | const server = setupServer(mockHandler)
82 |
83 | await server.boundary(async () => {
84 | const request = createMockToolRequest('list_traces', {
85 | query: 'http.status_code:500',
86 | from: 1640995000,
87 | to: 1640996000,
88 | limit: 50,
89 | })
90 | const response = (await toolHandlers.list_traces(
91 | request,
92 | )) as unknown as DatadogToolResponse
93 |
94 | expect(response.content[0].text).toContain('Traces:')
95 | expect(response.content[0].text).toContain('web-api')
96 | expect(response.content[0].text).toContain('GET /api/users')
97 | expect(response.content[0].text).toContain('GET /api/products')
98 | expect(response.content[0].text).toContain('count":2')
99 | })()
100 |
101 | server.close()
102 | })
103 |
104 | it('should include service and operation filters', async () => {
105 | const mockHandler = http.post(tracesEndpoint, async () => {
106 | return HttpResponse.json({
107 | data: [
108 | {
109 | id: 'span-id-3',
110 | type: 'spans',
111 | attributes: {
112 | service: 'payment-service',
113 | name: 'process-payment',
114 | resource: 'process-payment',
115 | trace_id: 'trace-id-3',
116 | span_id: 'span-id-3',
117 | parent_id: 'parent-id-3',
118 | start: 1640995100000000000,
119 | duration: 800000000,
120 | error: 1,
121 | meta: {
122 | 'error.type': 'PaymentProcessingError',
123 | },
124 | },
125 | },
126 | ],
127 | meta: {
128 | page: {
129 | after: null,
130 | },
131 | },
132 | })
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('list_traces', {
139 | query: 'error:true',
140 | from: 1640995000,
141 | to: 1640996000,
142 | service: 'payment-service',
143 | operation: 'process-payment',
144 | })
145 | const response = (await toolHandlers.list_traces(
146 | request,
147 | )) as unknown as DatadogToolResponse
148 |
149 | expect(response.content[0].text).toContain('payment-service')
150 | expect(response.content[0].text).toContain('process-payment')
151 | expect(response.content[0].text).toContain('PaymentProcessingError')
152 | })()
153 |
154 | server.close()
155 | })
156 |
157 | it('should handle ascending sort', async () => {
158 | const mockHandler = http.post(tracesEndpoint, async () => {
159 | return HttpResponse.json({
160 | data: [
161 | {
162 | id: 'span-id-oldest',
163 | type: 'spans',
164 | attributes: {
165 | service: 'api',
166 | name: 'http.request',
167 | start: 1640995000000000000,
168 | },
169 | },
170 | {
171 | id: 'span-id-newest',
172 | type: 'spans',
173 | attributes: {
174 | service: 'api',
175 | name: 'http.request',
176 | start: 1640995100000000000,
177 | },
178 | },
179 | ],
180 | })
181 | })
182 |
183 | const server = setupServer(mockHandler)
184 |
185 | await server.boundary(async () => {
186 | const request = createMockToolRequest('list_traces', {
187 | query: '',
188 | from: 1640995000,
189 | to: 1640996000,
190 | sort: 'timestamp', // ascending order
191 | })
192 | const response = (await toolHandlers.list_traces(
193 | request,
194 | )) as unknown as DatadogToolResponse
195 |
196 | expect(response.content[0].text).toContain('span-id-oldest')
197 | expect(response.content[0].text).toContain('span-id-newest')
198 | })()
199 |
200 | server.close()
201 | })
202 |
203 | it('should handle empty response', async () => {
204 | const mockHandler = http.post(tracesEndpoint, async () => {
205 | return HttpResponse.json({
206 | data: [],
207 | meta: {
208 | page: {},
209 | },
210 | })
211 | })
212 |
213 | const server = setupServer(mockHandler)
214 |
215 | await server.boundary(async () => {
216 | const request = createMockToolRequest('list_traces', {
217 | query: 'service:non-existent',
218 | from: 1640995000,
219 | to: 1640996000,
220 | })
221 | const response = (await toolHandlers.list_traces(
222 | request,
223 | )) as unknown as DatadogToolResponse
224 |
225 | expect(response.content[0].text).toContain('Traces:')
226 | expect(response.content[0].text).toContain('count":0')
227 | expect(response.content[0].text).toContain('traces":[]')
228 | })()
229 |
230 | server.close()
231 | })
232 |
233 | it('should handle null response data', async () => {
234 | const mockHandler = http.post(tracesEndpoint, async () => {
235 | return HttpResponse.json({
236 | data: null,
237 | meta: {
238 | page: {},
239 | },
240 | })
241 | })
242 |
243 | const server = setupServer(mockHandler)
244 |
245 | await server.boundary(async () => {
246 | const request = createMockToolRequest('list_traces', {
247 | query: '',
248 | from: 1640995000,
249 | to: 1640996000,
250 | })
251 | await expect(toolHandlers.list_traces(request)).rejects.toThrow(
252 | 'No traces data returned',
253 | )
254 | })()
255 |
256 | server.close()
257 | })
258 |
259 | it('should handle authentication errors', async () => {
260 | const mockHandler = http.post(tracesEndpoint, async () => {
261 | return HttpResponse.json(
262 | { errors: ['Authentication failed'] },
263 | { status: 403 },
264 | )
265 | })
266 |
267 | const server = setupServer(mockHandler)
268 |
269 | await server.boundary(async () => {
270 | const request = createMockToolRequest('list_traces', {
271 | query: '',
272 | from: 1640995000,
273 | to: 1640996000,
274 | })
275 | await expect(toolHandlers.list_traces(request)).rejects.toThrow()
276 | })()
277 |
278 | server.close()
279 | })
280 |
281 | it('should handle rate limit errors', async () => {
282 | const mockHandler = http.post(tracesEndpoint, async () => {
283 | return HttpResponse.json(
284 | { errors: ['Rate limit exceeded'] },
285 | { status: 429 },
286 | )
287 | })
288 |
289 | const server = setupServer(mockHandler)
290 |
291 | await server.boundary(async () => {
292 | const request = createMockToolRequest('list_traces', {
293 | query: '',
294 | from: 1640995000,
295 | to: 1640996000,
296 | })
297 | await expect(toolHandlers.list_traces(request)).rejects.toThrow(
298 | /errors./,
299 | )
300 | })()
301 |
302 | server.close()
303 | })
304 | })
305 | })
306 |
```
--------------------------------------------------------------------------------
/tests/tools/downtimes.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createDowntimesToolHandlers } from '../../src/tools/downtimes/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const downtimesEndpoint = `${baseUrl}/v1/downtime`
11 |
12 | describe('Downtimes Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.DowntimesApi(datadogConfig)
24 | const toolHandlers = createDowntimesToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/downtimes/#get-all-downtimes
27 | describe.concurrent('list_downtimes', async () => {
28 | it('should list downtimes', async () => {
29 | const mockHandler = http.get(downtimesEndpoint, async () => {
30 | return HttpResponse.json([
31 | {
32 | id: 123456789,
33 | active: true,
34 | disabled: false,
35 | start: 1640995100,
36 | end: 1640995200,
37 | scope: ['host:test-host'],
38 | message: 'Test downtime',
39 | monitor_id: 87654321,
40 | created: 1640995000,
41 | creator_id: 12345,
42 | updated_at: 1640995010,
43 | monitor_tags: ['env:test'],
44 | },
45 | {
46 | id: 987654321,
47 | active: false,
48 | disabled: false,
49 | start: 1641095100,
50 | end: 1641095200,
51 | scope: ['service:web'],
52 | message: 'Another test downtime',
53 | monitor_id: null,
54 | created: 1641095000,
55 | creator_id: 12345,
56 | updated_at: 1641095010,
57 | monitor_tags: ['service:web'],
58 | },
59 | ])
60 | })
61 |
62 | const server = setupServer(mockHandler)
63 |
64 | await server.boundary(async () => {
65 | const request = createMockToolRequest('list_downtimes', {
66 | currentOnly: true,
67 | })
68 | const response = (await toolHandlers.list_downtimes(
69 | request,
70 | )) as unknown as DatadogToolResponse
71 |
72 | expect(response.content[0].text).toContain('Listed downtimes:')
73 | expect(response.content[0].text).toContain('Test downtime')
74 | expect(response.content[0].text).toContain('Another test downtime')
75 | })()
76 |
77 | server.close()
78 | })
79 |
80 | it('should handle empty response', async () => {
81 | const mockHandler = http.get(downtimesEndpoint, async () => {
82 | return HttpResponse.json([])
83 | })
84 |
85 | const server = setupServer(mockHandler)
86 |
87 | await server.boundary(async () => {
88 | const request = createMockToolRequest('list_downtimes', {
89 | currentOnly: false,
90 | })
91 | const response = (await toolHandlers.list_downtimes(
92 | request,
93 | )) as unknown as DatadogToolResponse
94 |
95 | expect(response.content[0].text).toContain('Listed downtimes:')
96 | expect(response.content[0].text).toContain('[]')
97 | })()
98 |
99 | server.close()
100 | })
101 |
102 | it('should handle authentication errors', async () => {
103 | const mockHandler = http.get(downtimesEndpoint, async () => {
104 | return HttpResponse.json(
105 | { errors: ['Authentication failed'] },
106 | { status: 403 },
107 | )
108 | })
109 |
110 | const server = setupServer(mockHandler)
111 |
112 | await server.boundary(async () => {
113 | const request = createMockToolRequest('list_downtimes', {})
114 | await expect(toolHandlers.list_downtimes(request)).rejects.toThrow()
115 | })()
116 |
117 | server.close()
118 | })
119 |
120 | it('should handle rate limit errors', async () => {
121 | const mockHandler = http.get(downtimesEndpoint, async () => {
122 | return HttpResponse.json(
123 | { errors: ['Rate limit exceeded'] },
124 | { status: 429 },
125 | )
126 | })
127 |
128 | const server = setupServer(mockHandler)
129 |
130 | await server.boundary(async () => {
131 | const request = createMockToolRequest('list_downtimes', {})
132 | await expect(toolHandlers.list_downtimes(request)).rejects.toThrow(
133 | 'Rate limit exceeded',
134 | )
135 | })()
136 |
137 | server.close()
138 | })
139 | })
140 |
141 | // https://docs.datadoghq.com/api/latest/downtimes/#schedule-a-downtime
142 | describe.concurrent('schedule_downtime', async () => {
143 | it('should schedule a downtime', async () => {
144 | const mockHandler = http.post(downtimesEndpoint, async () => {
145 | return HttpResponse.json({
146 | id: 123456789,
147 | active: true,
148 | disabled: false,
149 | start: 1640995100,
150 | end: 1640995200,
151 | scope: ['host:test-host'],
152 | message: 'Scheduled maintenance',
153 | monitor_id: null,
154 | timezone: 'UTC',
155 | created: 1640995000,
156 | creator_id: 12345,
157 | updated_at: 1640995000,
158 | })
159 | })
160 |
161 | const server = setupServer(mockHandler)
162 |
163 | await server.boundary(async () => {
164 | const request = createMockToolRequest('schedule_downtime', {
165 | scope: 'host:test-host',
166 | start: 1640995100,
167 | end: 1640995200,
168 | message: 'Scheduled maintenance',
169 | timezone: 'UTC',
170 | })
171 | const response = (await toolHandlers.schedule_downtime(
172 | request,
173 | )) as unknown as DatadogToolResponse
174 |
175 | expect(response.content[0].text).toContain('Scheduled downtime:')
176 | expect(response.content[0].text).toContain('123456789')
177 | expect(response.content[0].text).toContain('Scheduled maintenance')
178 | })()
179 |
180 | server.close()
181 | })
182 |
183 | it('should schedule a recurring downtime', async () => {
184 | const mockHandler = http.post(downtimesEndpoint, async () => {
185 | return HttpResponse.json({
186 | id: 123456789,
187 | active: true,
188 | disabled: false,
189 | message: 'Weekly maintenance',
190 | scope: ['service:api'],
191 | recurrence: {
192 | type: 'weeks',
193 | period: 1,
194 | week_days: ['Mon'],
195 | },
196 | created: 1640995000,
197 | creator_id: 12345,
198 | updated_at: 1640995000,
199 | })
200 | })
201 |
202 | const server = setupServer(mockHandler)
203 |
204 | await server.boundary(async () => {
205 | const request = createMockToolRequest('schedule_downtime', {
206 | scope: 'service:api',
207 | message: 'Weekly maintenance',
208 | recurrence: {
209 | type: 'weeks',
210 | period: 1,
211 | weekDays: ['Mon'],
212 | },
213 | })
214 | const response = (await toolHandlers.schedule_downtime(
215 | request,
216 | )) as unknown as DatadogToolResponse
217 |
218 | expect(response.content[0].text).toContain('Scheduled downtime:')
219 | expect(response.content[0].text).toContain('Weekly maintenance')
220 | expect(response.content[0].text).toContain('weeks')
221 | expect(response.content[0].text).toContain('Mon')
222 | })()
223 |
224 | server.close()
225 | })
226 |
227 | it('should handle validation errors', async () => {
228 | const mockHandler = http.post(downtimesEndpoint, async () => {
229 | return HttpResponse.json(
230 | { errors: ['Invalid scope format'] },
231 | { status: 400 },
232 | )
233 | })
234 |
235 | const server = setupServer(mockHandler)
236 |
237 | await server.boundary(async () => {
238 | const request = createMockToolRequest('schedule_downtime', {
239 | scope: 'invalid:format',
240 | start: 1640995100,
241 | end: 1640995200,
242 | })
243 | await expect(toolHandlers.schedule_downtime(request)).rejects.toThrow(
244 | 'Invalid scope format',
245 | )
246 | })()
247 |
248 | server.close()
249 | })
250 | })
251 |
252 | // https://docs.datadoghq.com/api/latest/downtimes/#cancel-a-downtime
253 | describe.concurrent('cancel_downtime', async () => {
254 | it('should cancel a downtime', async () => {
255 | const downtimeId = 123456789
256 | const mockHandler = http.delete(
257 | `${downtimesEndpoint}/${downtimeId}`,
258 | async () => {
259 | return new HttpResponse(null, { status: 204 })
260 | },
261 | )
262 |
263 | const server = setupServer(mockHandler)
264 |
265 | await server.boundary(async () => {
266 | const request = createMockToolRequest('cancel_downtime', {
267 | downtimeId,
268 | })
269 | const response = (await toolHandlers.cancel_downtime(
270 | request,
271 | )) as unknown as DatadogToolResponse
272 |
273 | expect(response.content[0].text).toContain(
274 | `Cancelled downtime with ID: ${downtimeId}`,
275 | )
276 | })()
277 |
278 | server.close()
279 | })
280 |
281 | it('should handle not found errors', async () => {
282 | const downtimeId = 999999999
283 | const mockHandler = http.delete(
284 | `${downtimesEndpoint}/${downtimeId}`,
285 | async () => {
286 | return HttpResponse.json(
287 | { errors: ['Downtime not found'] },
288 | { status: 404 },
289 | )
290 | },
291 | )
292 |
293 | const server = setupServer(mockHandler)
294 |
295 | await server.boundary(async () => {
296 | const request = createMockToolRequest('cancel_downtime', {
297 | downtimeId,
298 | })
299 | await expect(toolHandlers.cancel_downtime(request)).rejects.toThrow(
300 | 'Downtime not found',
301 | )
302 | })()
303 |
304 | server.close()
305 | })
306 |
307 | it('should handle server errors', async () => {
308 | const downtimeId = 123456789
309 | const mockHandler = http.delete(
310 | `${downtimesEndpoint}/${downtimeId}`,
311 | async () => {
312 | return HttpResponse.json(
313 | { errors: ['Internal server error'] },
314 | { status: 500 },
315 | )
316 | },
317 | )
318 |
319 | const server = setupServer(mockHandler)
320 |
321 | await server.boundary(async () => {
322 | const request = createMockToolRequest('cancel_downtime', {
323 | downtimeId,
324 | })
325 | await expect(toolHandlers.cancel_downtime(request)).rejects.toThrow(
326 | 'Internal server error',
327 | )
328 | })()
329 |
330 | server.close()
331 | })
332 | })
333 | })
334 |
```
--------------------------------------------------------------------------------
/tests/tools/hosts.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createHostsToolHandlers } from '../../src/tools/hosts/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const hostsBaseEndpoint = `${baseUrl}/v1/hosts`
11 | const hostBaseEndpoint = `${baseUrl}/v1/host`
12 | const hostTotalsEndpoint = `${hostsBaseEndpoint}/totals`
13 |
14 | describe('Hosts Tool', () => {
15 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
16 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
17 | }
18 |
19 | const datadogConfig = createDatadogConfig({
20 | apiKeyAuth: process.env.DATADOG_API_KEY,
21 | appKeyAuth: process.env.DATADOG_APP_KEY,
22 | site: process.env.DATADOG_SITE,
23 | })
24 |
25 | const apiInstance = new v1.HostsApi(datadogConfig)
26 | const toolHandlers = createHostsToolHandlers(apiInstance)
27 |
28 | // https://docs.datadoghq.com/api/latest/hosts/#get-all-hosts
29 | describe.concurrent('list_hosts', async () => {
30 | it('should list hosts with filters', async () => {
31 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
32 | return HttpResponse.json({
33 | host_list: [
34 | {
35 | name: 'web-server-01',
36 | id: 12345,
37 | aliases: ['web-server-01.example.com'],
38 | apps: ['nginx', 'redis'],
39 | is_muted: false,
40 | last_reported_time: 1640995100,
41 | meta: {
42 | platform: 'linux',
43 | agent_version: '7.36.1',
44 | socket_hostname: 'web-server-01',
45 | },
46 | metrics: {
47 | load: 0.5,
48 | cpu: 45.6,
49 | memory: 78.2,
50 | },
51 | sources: ['agent'],
52 | up: true,
53 | },
54 | {
55 | name: 'db-server-01',
56 | id: 67890,
57 | aliases: ['db-server-01.example.com'],
58 | apps: ['postgres'],
59 | is_muted: true,
60 | last_reported_time: 1640995000,
61 | meta: {
62 | platform: 'linux',
63 | agent_version: '7.36.1',
64 | socket_hostname: 'db-server-01',
65 | },
66 | metrics: {
67 | load: 1.2,
68 | cpu: 78.3,
69 | memory: 92.1,
70 | },
71 | sources: ['agent'],
72 | up: true,
73 | },
74 | ],
75 | total_matching: 2,
76 | total_returned: 2,
77 | })
78 | })
79 |
80 | const server = setupServer(mockHandler)
81 |
82 | await server.boundary(async () => {
83 | const request = createMockToolRequest('list_hosts', {
84 | filter: 'env:production',
85 | sort_field: 'status',
86 | sort_dir: 'desc',
87 | include_hosts_metadata: true,
88 | })
89 | const response = (await toolHandlers.list_hosts(
90 | request,
91 | )) as unknown as DatadogToolResponse
92 |
93 | expect(response.content[0].text).toContain('Hosts:')
94 | expect(response.content[0].text).toContain('web-server-01')
95 | expect(response.content[0].text).toContain('db-server-01')
96 | expect(response.content[0].text).toContain('postgres')
97 | })()
98 |
99 | server.close()
100 | })
101 |
102 | it('should handle empty response', async () => {
103 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
104 | return HttpResponse.json({
105 | host_list: [],
106 | total_matching: 0,
107 | total_returned: 0,
108 | })
109 | })
110 |
111 | const server = setupServer(mockHandler)
112 |
113 | await server.boundary(async () => {
114 | const request = createMockToolRequest('list_hosts', {
115 | filter: 'non-existent:value',
116 | })
117 | const response = (await toolHandlers.list_hosts(
118 | request,
119 | )) as unknown as DatadogToolResponse
120 |
121 | expect(response.content[0].text).toContain('Hosts: []')
122 | })()
123 |
124 | server.close()
125 | })
126 |
127 | it('should handle missing host_list', async () => {
128 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
129 | return HttpResponse.json({
130 | total_matching: 0,
131 | total_returned: 0,
132 | })
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('list_hosts', {})
139 | await expect(toolHandlers.list_hosts(request)).rejects.toThrow(
140 | 'No hosts data returned',
141 | )
142 | })()
143 |
144 | server.close()
145 | })
146 |
147 | it('should handle authentication errors', async () => {
148 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
149 | return HttpResponse.json(
150 | { errors: ['Authentication failed'] },
151 | { status: 403 },
152 | )
153 | })
154 |
155 | const server = setupServer(mockHandler)
156 |
157 | await server.boundary(async () => {
158 | const request = createMockToolRequest('list_hosts', {})
159 | await expect(toolHandlers.list_hosts(request)).rejects.toThrow()
160 | })()
161 |
162 | server.close()
163 | })
164 | })
165 |
166 | // https://docs.datadoghq.com/api/latest/hosts/#get-the-total-number-of-active-hosts
167 | describe.concurrent('get_active_hosts_count', async () => {
168 | it('should get active hosts count', async () => {
169 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
170 | return HttpResponse.json({
171 | total_up: 512,
172 | total_active: 520,
173 | })
174 | })
175 |
176 | const server = setupServer(mockHandler)
177 |
178 | await server.boundary(async () => {
179 | const request = createMockToolRequest('get_active_hosts_count', {
180 | from: 3600,
181 | })
182 | const response = (await toolHandlers.get_active_hosts_count(
183 | request,
184 | )) as unknown as DatadogToolResponse
185 |
186 | expect(response.content[0].text).toContain('total_active')
187 | expect(response.content[0].text).toContain('520')
188 | expect(response.content[0].text).toContain('total_up')
189 | expect(response.content[0].text).toContain('512')
190 | })()
191 |
192 | server.close()
193 | })
194 |
195 | it('should use default from value if not provided', async () => {
196 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
197 | return HttpResponse.json({
198 | total_up: 510,
199 | total_active: 518,
200 | })
201 | })
202 |
203 | const server = setupServer(mockHandler)
204 |
205 | await server.boundary(async () => {
206 | const request = createMockToolRequest('get_active_hosts_count', {})
207 | const response = (await toolHandlers.get_active_hosts_count(
208 | request,
209 | )) as unknown as DatadogToolResponse
210 |
211 | expect(response.content[0].text).toContain('518')
212 | expect(response.content[0].text).toContain('510')
213 | })()
214 |
215 | server.close()
216 | })
217 |
218 | it('should handle server errors', async () => {
219 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
220 | return HttpResponse.json(
221 | { errors: ['Internal server error'] },
222 | { status: 500 },
223 | )
224 | })
225 |
226 | const server = setupServer(mockHandler)
227 |
228 | await server.boundary(async () => {
229 | const request = createMockToolRequest('get_active_hosts_count', {})
230 | await expect(
231 | toolHandlers.get_active_hosts_count(request),
232 | ).rejects.toThrow()
233 | })()
234 |
235 | server.close()
236 | })
237 | })
238 |
239 | // https://docs.datadoghq.com/api/latest/hosts/#mute-a-host
240 | describe.concurrent('mute_host', async () => {
241 | it('should mute a host', async () => {
242 | const mockHandler = http.post(
243 | `${hostBaseEndpoint}/:hostname/mute`,
244 | async ({ params }) => {
245 | return HttpResponse.json({
246 | action: 'muted',
247 | hostname: params.hostname,
248 | message: 'Maintenance in progress',
249 | end: 1641095100,
250 | })
251 | },
252 | )
253 |
254 | const server = setupServer(mockHandler)
255 |
256 | await server.boundary(async () => {
257 | const request = createMockToolRequest('mute_host', {
258 | hostname: 'test-host',
259 | message: 'Maintenance in progress',
260 | end: 1641095100,
261 | override: true,
262 | })
263 | const response = (await toolHandlers.mute_host(
264 | request,
265 | )) as unknown as DatadogToolResponse
266 |
267 | expect(response.content[0].text).toContain('success')
268 | expect(response.content[0].text).toContain('test-host')
269 | expect(response.content[0].text).toContain('Maintenance in progress')
270 | })()
271 |
272 | server.close()
273 | })
274 |
275 | it('should handle host not found', async () => {
276 | const mockHandler = http.post(
277 | `${hostBaseEndpoint}/:hostname/mute`,
278 | async () => {
279 | return HttpResponse.json(
280 | { errors: ['Host not found'] },
281 | { status: 404 },
282 | )
283 | },
284 | )
285 |
286 | const server = setupServer(mockHandler)
287 |
288 | await server.boundary(async () => {
289 | const request = createMockToolRequest('mute_host', {
290 | hostname: 'non-existent-host',
291 | })
292 | await expect(toolHandlers.mute_host(request)).rejects.toThrow(
293 | 'Host not found',
294 | )
295 | })()
296 |
297 | server.close()
298 | })
299 | })
300 |
301 | // https://docs.datadoghq.com/api/latest/hosts/#unmute-a-host
302 | describe.concurrent('unmute_host', async () => {
303 | it('should unmute a host', async () => {
304 | const mockHandler = http.post(
305 | `${hostBaseEndpoint}/:hostname/unmute`,
306 | async ({ params }) => {
307 | return HttpResponse.json({
308 | action: 'unmuted',
309 | hostname: params.hostname,
310 | })
311 | },
312 | )
313 |
314 | const server = setupServer(mockHandler)
315 |
316 | await server.boundary(async () => {
317 | const request = createMockToolRequest('unmute_host', {
318 | hostname: 'test-host',
319 | })
320 | const response = (await toolHandlers.unmute_host(
321 | request,
322 | )) as unknown as DatadogToolResponse
323 |
324 | expect(response.content[0].text).toContain('success')
325 | expect(response.content[0].text).toContain('test-host')
326 | expect(response.content[0].text).toContain('unmuted')
327 | })()
328 |
329 | server.close()
330 | })
331 |
332 | it('should handle host not found', async () => {
333 | const mockHandler = http.post(
334 | `${hostBaseEndpoint}/:hostname/unmute`,
335 | async () => {
336 | return HttpResponse.json(
337 | { errors: ['Host not found'] },
338 | { status: 404 },
339 | )
340 | },
341 | )
342 |
343 | const server = setupServer(mockHandler)
344 |
345 | await server.boundary(async () => {
346 | const request = createMockToolRequest('unmute_host', {
347 | hostname: 'non-existent-host',
348 | })
349 | await expect(toolHandlers.unmute_host(request)).rejects.toThrow(
350 | 'Host not found',
351 | )
352 | })()
353 |
354 | server.close()
355 | })
356 |
357 | it('should handle host already unmuted', async () => {
358 | const mockHandler = http.post(
359 | `${hostBaseEndpoint}/:hostname/unmute`,
360 | async () => {
361 | return HttpResponse.json(
362 | { errors: ['Host is not muted'] },
363 | { status: 400 },
364 | )
365 | },
366 | )
367 |
368 | const server = setupServer(mockHandler)
369 |
370 | await server.boundary(async () => {
371 | const request = createMockToolRequest('unmute_host', {
372 | hostname: 'already-unmuted-host',
373 | })
374 | await expect(toolHandlers.unmute_host(request)).rejects.toThrow(
375 | 'Host is not muted',
376 | )
377 | })()
378 |
379 | server.close()
380 | })
381 | })
382 | })
383 |
```
--------------------------------------------------------------------------------
/tests/tools/logs.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createLogsToolHandlers } from '../../src/tools/logs/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const logsEndpoint = `${baseUrl}/v2/logs/events/search`
11 |
12 | describe('Logs Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v2.LogsApi(datadogConfig)
24 | const toolHandlers = createLogsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/logs/#search-logs
27 | describe.concurrent('get_logs', async () => {
28 | it('should retrieve logs', async () => {
29 | // Mock API response based on Datadog API documentation
30 | const mockHandler = http.post(logsEndpoint, async () => {
31 | return HttpResponse.json({
32 | data: [
33 | {
34 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
35 | attributes: {
36 | timestamp: 1640995199999,
37 | status: 'info',
38 | message: 'Test log message',
39 | service: 'test-service',
40 | tags: ['env:test'],
41 | },
42 | type: 'log',
43 | },
44 | ],
45 | meta: {
46 | page: {
47 | after:
48 | 'eyJzdGFydEF0IjoiQVFBQUFYR0xkRDBBQUFCUFYtNXdocWdCIiwiaW5kZXgiOiJtYWluIn0=',
49 | },
50 | },
51 | })
52 | })
53 |
54 | const server = setupServer(mockHandler)
55 |
56 | await server.boundary(async () => {
57 | const request = createMockToolRequest('get_logs', {
58 | query: 'service:test-service',
59 | from: 1640995100, // epoch seconds
60 | to: 1640995200, // epoch seconds
61 | limit: 10,
62 | })
63 | const response = (await toolHandlers.get_logs(
64 | request,
65 | )) as unknown as DatadogToolResponse
66 | expect(response.content[0].text).toContain('Logs data')
67 | expect(response.content[0].text).toContain('Test log message')
68 | })()
69 |
70 | server.close()
71 | })
72 |
73 | it('should handle empty response', async () => {
74 | const mockHandler = http.post(logsEndpoint, async () => {
75 | return HttpResponse.json({
76 | data: [],
77 | meta: {
78 | page: {},
79 | },
80 | })
81 | })
82 |
83 | const server = setupServer(mockHandler)
84 |
85 | await server.boundary(async () => {
86 | const request = createMockToolRequest('get_logs', {
87 | query: 'service:non-existent',
88 | from: 1640995100,
89 | to: 1640995200,
90 | })
91 | const response = (await toolHandlers.get_logs(
92 | request,
93 | )) as unknown as DatadogToolResponse
94 | expect(response.content[0].text).toContain('Logs data')
95 | expect(response.content[0].text).toContain('[]')
96 | })()
97 |
98 | server.close()
99 | })
100 |
101 | it('should handle null response data', async () => {
102 | const mockHandler = http.post(logsEndpoint, async () => {
103 | return HttpResponse.json({
104 | data: null,
105 | meta: {
106 | page: {},
107 | },
108 | })
109 | })
110 |
111 | const server = setupServer(mockHandler)
112 |
113 | await server.boundary(async () => {
114 | const request = createMockToolRequest('get_logs', {
115 | query: 'service:test',
116 | from: 1640995100,
117 | to: 1640995200,
118 | })
119 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
120 | 'No logs data returned',
121 | )
122 | })()
123 |
124 | server.close()
125 | })
126 |
127 | it('should handle authentication errors', async () => {
128 | const mockHandler = http.post(logsEndpoint, async () => {
129 | return HttpResponse.json(
130 | { errors: ['Authentication failed'] },
131 | { status: 403 },
132 | )
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('get_logs', {
139 | query: 'service:test',
140 | from: 1640995100,
141 | to: 1640995200,
142 | })
143 | await expect(toolHandlers.get_logs(request)).rejects.toThrow()
144 | })()
145 |
146 | server.close()
147 | })
148 |
149 | it('should handle rate limit errors', async () => {
150 | const mockHandler = http.post(logsEndpoint, async () => {
151 | return HttpResponse.json(
152 | { errors: ['Rate limit exceeded'] },
153 | { status: 429 },
154 | )
155 | })
156 |
157 | const server = setupServer(mockHandler)
158 |
159 | await server.boundary(async () => {
160 | const request = createMockToolRequest('get_logs', {
161 | query: 'service:test',
162 | from: 1640995100,
163 | to: 1640995200,
164 | })
165 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
166 | 'Rate limit exceeded',
167 | )
168 | })()
169 |
170 | server.close()
171 | })
172 |
173 | it('should handle server errors', async () => {
174 | const mockHandler = http.post(logsEndpoint, async () => {
175 | return HttpResponse.json(
176 | { errors: ['Internal server error'] },
177 | { status: 500 },
178 | )
179 | })
180 |
181 | const server = setupServer(mockHandler)
182 |
183 | await server.boundary(async () => {
184 | const request = createMockToolRequest('get_logs', {
185 | query: 'service:test',
186 | from: 1640995100,
187 | to: 1640995200,
188 | })
189 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
190 | 'Internal server error',
191 | )
192 | })()
193 |
194 | server.close()
195 | })
196 | })
197 |
198 | describe.concurrent('get_all_services', async () => {
199 | it('should extract unique service names from logs', async () => {
200 | // Mock API response with multiple services
201 | const mockHandler = http.post(logsEndpoint, async () => {
202 | return HttpResponse.json({
203 | data: [
204 | {
205 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
206 | attributes: {
207 | timestamp: 1640995199000,
208 | status: 'info',
209 | message: 'Test log message 1',
210 | service: 'web-service',
211 | tags: ['env:test'],
212 | },
213 | type: 'log',
214 | },
215 | {
216 | id: 'AAAAAXGLdD0AAABPV-5whqgC',
217 | attributes: {
218 | timestamp: 1640995198000,
219 | status: 'info',
220 | message: 'Test log message 2',
221 | service: 'api-service',
222 | tags: ['env:test'],
223 | },
224 | type: 'log',
225 | },
226 | {
227 | id: 'AAAAAXGLdD0AAABPV-5whqgD',
228 | attributes: {
229 | timestamp: 1640995197000,
230 | status: 'info',
231 | message: 'Test log message 3',
232 | service: 'web-service', // Duplicate service to test uniqueness
233 | tags: ['env:test'],
234 | },
235 | type: 'log',
236 | },
237 | {
238 | id: 'AAAAAXGLdD0AAABPV-5whqgE',
239 | attributes: {
240 | timestamp: 1640995196000,
241 | status: 'error',
242 | message: 'Test error message',
243 | service: 'database-service',
244 | tags: ['env:test'],
245 | },
246 | type: 'log',
247 | },
248 | ],
249 | meta: {
250 | page: {},
251 | },
252 | })
253 | })
254 |
255 | const server = setupServer(mockHandler)
256 |
257 | await server.boundary(async () => {
258 | const request = createMockToolRequest('get_all_services', {
259 | query: '*',
260 | from: 1640995100, // epoch seconds
261 | to: 1640995200, // epoch seconds
262 | limit: 100,
263 | })
264 | const response = (await toolHandlers.get_all_services(
265 | request,
266 | )) as unknown as DatadogToolResponse
267 |
268 | expect(response.content[0].text).toContain('Services')
269 | // Check if response contains the expected services (sorted alphabetically)
270 | const expected = ['api-service', 'database-service', 'web-service']
271 | expected.forEach((service) => {
272 | expect(response.content[0].text).toContain(service)
273 | })
274 |
275 | // Check that we've extracted unique services (no duplicates)
276 | const servicesText = response.content[0].text
277 | const servicesJson = JSON.parse(
278 | servicesText.substring(
279 | servicesText.indexOf('['),
280 | servicesText.lastIndexOf(']') + 1,
281 | ),
282 | )
283 | expect(servicesJson).toHaveLength(3) // Only 3 unique services, not 4
284 | expect(servicesJson).toEqual(expected)
285 | })()
286 |
287 | server.close()
288 | })
289 |
290 | it('should handle logs with missing service attributes', async () => {
291 | const mockHandler = http.post(logsEndpoint, async () => {
292 | return HttpResponse.json({
293 | data: [
294 | {
295 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
296 | attributes: {
297 | timestamp: 1640995199000,
298 | status: 'info',
299 | message: 'Test log message 1',
300 | service: 'web-service',
301 | tags: ['env:test'],
302 | },
303 | type: 'log',
304 | },
305 | {
306 | id: 'AAAAAXGLdD0AAABPV-5whqgC',
307 | attributes: {
308 | timestamp: 1640995198000,
309 | status: 'info',
310 | message: 'Test log message with no service',
311 | // No service attribute
312 | tags: ['env:test'],
313 | },
314 | type: 'log',
315 | },
316 | ],
317 | meta: {
318 | page: {},
319 | },
320 | })
321 | })
322 |
323 | const server = setupServer(mockHandler)
324 |
325 | await server.boundary(async () => {
326 | const request = createMockToolRequest('get_all_services', {
327 | query: '*',
328 | from: 1640995100,
329 | to: 1640995200,
330 | limit: 100,
331 | })
332 | const response = (await toolHandlers.get_all_services(
333 | request,
334 | )) as unknown as DatadogToolResponse
335 |
336 | expect(response.content[0].text).toContain('Services')
337 | expect(response.content[0].text).toContain('web-service')
338 |
339 | // Ensure we only have one service (the one with a defined service attribute)
340 | const servicesText = response.content[0].text
341 | const servicesJson = JSON.parse(
342 | servicesText.substring(
343 | servicesText.indexOf('['),
344 | servicesText.lastIndexOf(']') + 1,
345 | ),
346 | )
347 | expect(servicesJson).toHaveLength(1)
348 | })()
349 |
350 | server.close()
351 | })
352 |
353 | it('should handle empty response data', async () => {
354 | const mockHandler = http.post(logsEndpoint, async () => {
355 | return HttpResponse.json({
356 | data: [],
357 | meta: {
358 | page: {},
359 | },
360 | })
361 | })
362 |
363 | const server = setupServer(mockHandler)
364 |
365 | await server.boundary(async () => {
366 | const request = createMockToolRequest('get_all_services', {
367 | query: 'service:non-existent',
368 | from: 1640995100,
369 | to: 1640995200,
370 | limit: 100,
371 | })
372 | const response = (await toolHandlers.get_all_services(
373 | request,
374 | )) as unknown as DatadogToolResponse
375 |
376 | expect(response.content[0].text).toContain('Services')
377 | expect(response.content[0].text).toContain('[]') // Empty array of services
378 | })()
379 |
380 | server.close()
381 | })
382 | })
383 | })
384 |
```