This is page 3 of 3. Use http://codebase.md/metoro-io/metoro-mcp-server?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .github
│ └── workflows
│ ├── go-test.yml
│ └── release.yml
├── .gitignore
├── .goreleaser.yml
├── go.mod
├── go.sum
├── images
│ └── Metoro_square.svg
├── LICENSE
├── main.go
├── model
│ ├── model_action_email_destination.go
│ ├── model_action_pager_duty_destination.go
│ ├── model_action_slack_destination.go
│ ├── model_action_webhook_destination.go
│ ├── model_action.go
│ ├── model_alert_type.go
│ ├── model_alert.go
│ ├── model_condition_type.go
│ ├── model_condition.go
│ ├── model_create_update_alert_request.go
│ ├── model_expression_config_filters_inner.go
│ ├── model_expression_config_metoro_timeseries_definition.go
│ ├── model_expression_config.go
│ ├── model_metadata_object.go
│ ├── model_metoro_ql_timeseries.go
│ ├── model_operator_config.go
│ ├── model_operator_type.go
│ ├── model_persistence_settings.go
│ ├── model_static_condition.go
│ ├── model_timeseries_config_expression.go
│ ├── model_timeseries_config.go
│ ├── model_timeseries_specifier_filters_inner.go
│ ├── model_timeseries_specifier_function.go
│ ├── model_timeseries_specifier_functions_math_expression.go
│ ├── model_timeseries_specifier_functions.go
│ ├── model_timeseries_specifier_kubernetes_resource.go
│ ├── model_timeseries_specifier_logs.go
│ ├── model_timeseries_specifier_metric.go
│ ├── model_timeseries_specifier_traces.go
│ ├── model_timeseries_specifier.go
│ ├── model.go
│ └── utils.go
├── README.md
├── resources
│ ├── environments.go
│ ├── k8s_events_attributes.go
│ ├── log_attributes.go
│ ├── metrics.go
│ ├── namespaces.go
│ ├── nodes.go
│ ├── resources.go
│ ├── services.go
│ └── trace_attributes.go
├── tools
│ ├── create_ai_issue.go
│ ├── create_alert.go
│ ├── create_dashboard.go
│ ├── create_investigation.go
│ ├── get_ai_issue.go
│ ├── get_alert_fires.go
│ ├── get_alerts.go
│ ├── get_attribute_keys.go
│ ├── get_attribute_values.go
│ ├── get_environments.go
│ ├── get_k8s_event_attribute_values.go
│ ├── get_k8s_events_attributes.go
│ ├── get_k8s_events_volume.go
│ ├── get_k8s_events.go
│ ├── get_k8s_service_information.go
│ ├── get_log_attribute_values.go
│ ├── get_log_attributes.go
│ ├── get_logs.go
│ ├── get_metric_attributes.go
│ ├── get_metric_metadata.go
│ ├── get_metric_names.go
│ ├── get_multi_metric.go
│ ├── get_namespaces.go
│ ├── get_node_attributes.go
│ ├── get_node_info.go
│ ├── get_nodes.go
│ ├── get_pod_by_ip.go
│ ├── get_pods.go
│ ├── get_profiles.go
│ ├── get_service_graph.go
│ ├── get_service_summaries.go
│ ├── get_services.go
│ ├── get_source_repository.go
│ ├── get_trace_attribute_values.go
│ ├── get_trace_attributes.go
│ ├── get_trace_metric.go
│ ├── get_trace_spans.go
│ ├── get_traces_distribution.go
│ ├── get_traces.go
│ ├── get_version_for_service.go
│ ├── list_ai_issue_events.go
│ ├── list_ai_issues.go
│ ├── list_investigations.go
│ ├── tools.go
│ ├── unix_to_rfc3339.go
│ ├── update_ai_issue.go
│ └── update_investigation.go
└── utils
├── request_utils.go
├── time_utils_test.go
└── time_utils.go
```
# Files
--------------------------------------------------------------------------------
/tools/get_multi_metric.go:
--------------------------------------------------------------------------------
```go
1 | package tools
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "slices"
9 | "strings"
10 |
11 | "github.com/metoro-io/metoro-mcp-server/model"
12 | "github.com/metoro-io/metoro-mcp-server/utils"
13 |
14 | mcpgolang "github.com/metoro-io/mcp-golang"
15 | )
16 |
17 | type GetMultiMetricHandlerArgs struct {
18 | TimeConfig utils.TimeConfig `json:"time_config" jsonschema:"required,description=The time period to get the timeseries data for. e.g. if you want to get the timeseries data for the last 5 minutes you would set time_period=5 and time_window=Minutes. You can also set an absoulute time range by setting start_time and end_time"`
19 | Timeseries []model.SingleTimeseriesRequest `json:"timeseries" jsonschema:"required,description=Array of timeseries data to get. Each item in this array corresponds to a single timeseries. You can then use the formulas to combine these timeseries. If you only want to see the combination of timeseries via defining formulas and if you dont want to see the individual timeseries data when setting formulas you can set shouldNotReturn to true"`
20 | Formulas []model.Formula `json:"formulas" jsonschema:"description=Optional formulas to combine timeseries. Formula should only consist of formulaIdentifier of the timeseries in the timeseries array. e.g. a + b + c if a b c appears in the formulaIdentifier of the timeseries array. You can ONLY do the following operations: Arithmetic operations:+ (for add) - (for substract) * (for multiply) / (for division) % (for modulus) ^ or ** (for exponent). Comparison: == != < > <= >= . Logical:! (for not) && (for AND) || (for OR). Conditional operations: ?: (ternary) e.g. (a || b) ? 1 : 0. Do not guess the operations. Just use these available ones!"`
21 | }
22 |
23 | func GetMultiMetricHandler(ctx context.Context, arguments GetMultiMetricHandlerArgs) (*mcpgolang.ToolResponse, error) {
24 | startTime, endTime, err := utils.CalculateTimeRange(arguments.TimeConfig)
25 | if err != nil {
26 | return nil, fmt.Errorf("error calculating time range: %v", err)
27 | }
28 |
29 | err = checkTimeseries(ctx, arguments.Timeseries, startTime, endTime)
30 | if err != nil {
31 | return nil, err
32 | }
33 |
34 | request := model.GetMultiMetricRequest{
35 | StartTime: startTime,
36 | EndTime: endTime,
37 | Metrics: convertTimeseriesToAPITimeseries(arguments.Timeseries, startTime, endTime),
38 | Formulas: arguments.Formulas,
39 | }
40 |
41 | if len(arguments.Timeseries) == 0 {
42 | return nil, fmt.Errorf("no timeseries data provided")
43 | }
44 |
45 | body, err := getMultiMetricMetoroCall(ctx, request)
46 | if err != nil {
47 | return nil, fmt.Errorf("error getting metric: %v", err)
48 | }
49 | return mcpgolang.NewToolResponse(mcpgolang.NewTextContent(fmt.Sprintf("%s", string(body)))), nil
50 | }
51 |
52 | func getMultiMetricMetoroCall(ctx context.Context, request model.GetMultiMetricRequest) ([]byte, error) {
53 | requestBody, err := json.Marshal(request)
54 | if err != nil {
55 | return nil, fmt.Errorf("error marshaling metric request: %v", err)
56 | }
57 | return utils.MakeMetoroAPIRequest("POST", "metrics", bytes.NewBuffer(requestBody), utils.GetAPIRequirementsFromRequest(ctx))
58 | }
59 |
60 | func convertTimeseriesToAPITimeseries(timeseries []model.SingleTimeseriesRequest, startTime int64, endTime int64) []model.SingleMetricRequest {
61 | result := make([]model.SingleMetricRequest, len(timeseries))
62 |
63 | for i, ts := range timeseries {
64 | apiRequest := model.SingleMetricRequest{
65 | Type: string(ts.Type),
66 | ShouldNotReturn: ts.ShouldNotReturn,
67 | FormulaIdentifier: ts.FormulaIdentifier,
68 | }
69 |
70 | switch ts.Type {
71 | case model.Metric:
72 | apiRequest.Metric = &model.GetMetricRequest{
73 | StartTime: startTime,
74 | EndTime: endTime,
75 | MetricName: ts.MetricName,
76 | Filters: ts.Filters,
77 | ExcludeFilters: ts.ExcludeFilters,
78 | Splits: ts.Splits,
79 | Aggregation: ts.Aggregation,
80 | Functions: ts.Functions,
81 | //Functions: ts.Metric.Functions,
82 | //LimitResults: ts.Metric.LimitResults,
83 | BucketSize: ts.BucketSize,
84 | }
85 |
86 | case model.Trace:
87 | apiRequest.Trace = &model.GetTraceMetricRequest{
88 | StartTime: startTime,
89 | EndTime: endTime,
90 | Filters: ts.Filters,
91 | ExcludeFilters: ts.ExcludeFilters,
92 | Splits: ts.Splits,
93 | Aggregate: ts.Aggregation,
94 | BucketSize: ts.BucketSize,
95 | Functions: ts.Functions,
96 | //ServiceNames: ts.ServiceNames,
97 | //Regexes: ts.Regexes,
98 | //ExcludeRegexes: ts.ExcludeRegexes,
99 | //Environments: ts.Environments,
100 | //LimitResults: ts.LimitResults,
101 | //
102 | }
103 |
104 | case model.Logs:
105 | apiRequest.Logs = &model.GetLogMetricRequest{
106 | GetLogsRequest: model.GetLogsRequest{
107 | StartTime: startTime,
108 | EndTime: endTime,
109 | Filters: ts.Filters,
110 | ExcludeFilters: ts.ExcludeFilters,
111 | Regexes: ts.Regexes,
112 | ExcludeRegexes: ts.ExcludeRegexes,
113 | //Environments: ts.Environments,
114 | },
115 | Functions: ts.Functions,
116 | Splits: ts.Splits,
117 | BucketSize: ts.BucketSize,
118 | //Functions: ts.Functions,
119 | }
120 | case model.KubernetesResource:
121 | apiRequest.KubernetesResource = &model.GetKubernetesResourceRequest{
122 | StartTime: startTime,
123 | EndTime: endTime,
124 | Filters: ts.Filters,
125 | ExcludeFilters: ts.ExcludeFilters,
126 | Splits: ts.Splits,
127 | BucketSize: ts.BucketSize,
128 | Functions: ts.Functions,
129 | JsonPath: ts.JsonPath,
130 | Aggregation: ts.Aggregation,
131 | }
132 | }
133 | result[i] = apiRequest
134 | }
135 |
136 | return result
137 | }
138 |
139 | func CheckAttributes(ctx context.Context, requestType model.MetricType, filters map[string][]string, excludeFilters map[string][]string, splits []string, metricRequest *model.GetMetricAttributesRequest) error {
140 | // Check whether the attributes given are valid.
141 | request := model.MultiMetricAttributeKeysRequest{
142 | Type: string(requestType),
143 | Metric: metricRequest,
144 | }
145 | jsonBody, err := json.Marshal(request)
146 | if err != nil {
147 | return fmt.Errorf("error marshaling request: %v", err)
148 | }
149 |
150 | attributeResp, err := utils.MakeMetoroAPIRequest("POST", "metrics/attributes", bytes.NewBuffer(jsonBody), utils.GetAPIRequirementsFromRequest(ctx))
151 | if err != nil {
152 | return fmt.Errorf("error making Metoro call: %v", err)
153 | }
154 |
155 | attributeKeys := model.GetAttributeKeysResponse{}
156 | err = json.Unmarshal(attributeResp, &attributeKeys)
157 | if err != nil {
158 | return fmt.Errorf("error unmarshaling response: %v", err)
159 | }
160 |
161 | attributesAsString := strings.Join(attributeKeys.Attributes, ", ")
162 |
163 | // Check whether the filters given are valid.
164 | for key, _ := range filters {
165 | if !slices.Contains(attributeKeys.Attributes, key) {
166 | return fmt.Errorf("invalid filter key: %s. Valid filter keys are: %s. Please try again with a valid key", key, attributesAsString)
167 | }
168 | }
169 |
170 | for key, _ := range excludeFilters {
171 | if !slices.Contains(attributeKeys.Attributes, key) {
172 | return fmt.Errorf("invalid exclude filter key: %s. Valid keys are: %s. Please try again with a valid key", key, attributesAsString)
173 | }
174 | }
175 |
176 | for _, split := range splits {
177 | if !slices.Contains(attributeKeys.Attributes, split) {
178 | return fmt.Errorf("invalid split key: %s. Valid keys are: %s. Please try again with a valid key", split, attributesAsString)
179 | }
180 | }
181 | return nil
182 | }
183 |
184 | func checkTimeseries(ctx context.Context, timeseries []model.SingleTimeseriesRequest, startTime, endTime int64) error {
185 | for _, ts := range timeseries {
186 | switch ts.Type {
187 | case model.Metric:
188 | err := CheckMetric(ctx, ts.MetricName)
189 | if err != nil {
190 | return err
191 | }
192 | err = CheckAttributes(ctx, ts.Type, ts.Filters, ts.ExcludeFilters, ts.Splits, &model.GetMetricAttributesRequest{
193 | StartTime: startTime,
194 | EndTime: endTime,
195 | MetricName: ts.MetricName,
196 | })
197 | if err != nil {
198 | return err
199 | }
200 | case model.Trace:
201 | err := CheckAttributes(ctx, ts.Type, ts.Filters, ts.ExcludeFilters, ts.Splits, nil)
202 | if err != nil {
203 | return err
204 | }
205 | case model.Logs:
206 | err := CheckAttributes(ctx, ts.Type, ts.Filters, ts.ExcludeFilters, ts.Splits, nil)
207 | if err != nil {
208 | return err
209 | }
210 | }
211 | }
212 | return nil
213 | }
214 |
```
--------------------------------------------------------------------------------
/model/model_action.go:
--------------------------------------------------------------------------------
```go
1 | /*
2 | Metoro API
3 |
4 | API for managing Metoro environments, alerts, and dashboards.
5 |
6 | API version: 1.0.0
7 | */
8 |
9 | // Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
10 |
11 | package model
12 |
13 | import (
14 | "bytes"
15 | "encoding/json"
16 | "fmt"
17 | )
18 |
19 | // checks if the Action type satisfies the MappedNullable interface at compile time
20 | var _ MappedNullable = &Action{}
21 |
22 | // Action struct for Action
23 | type Action struct {
24 | // Type of action destination
25 | Type string `json:"type"`
26 | SlackDestination *ActionSlackDestination `json:"slackDestination,omitempty"`
27 | PagerDutyDestination *ActionPagerDutyDestination `json:"pagerDutyDestination,omitempty"`
28 | EmailDestination *ActionEmailDestination `json:"emailDestination,omitempty"`
29 | WebhookDestination *ActionWebhookDestination `json:"webhookDestination,omitempty"`
30 | }
31 |
32 | type _Action Action
33 |
34 | // NewAction instantiates a new Action object
35 | // This constructor will assign default values to properties that have it defined,
36 | // and makes sure properties required by API are set, but the set of arguments
37 | // will change when the set of required properties is changed
38 | func NewAction(type_ string) *Action {
39 | this := Action{}
40 | this.Type = type_
41 | return &this
42 | }
43 |
44 | // NewActionWithDefaults instantiates a new Action object
45 | // This constructor will only assign default values to properties that have it defined,
46 | // but it doesn't guarantee that properties required by API are set
47 | func NewActionWithDefaults() *Action {
48 | this := Action{}
49 | return &this
50 | }
51 |
52 | // GetType returns the Type field value
53 | func (o *Action) GetType() string {
54 | if o == nil {
55 | var ret string
56 | return ret
57 | }
58 |
59 | return o.Type
60 | }
61 |
62 | // GetTypeOk returns a tuple with the Type field value
63 | // and a boolean to check if the value has been set.
64 | func (o *Action) GetTypeOk() (*string, bool) {
65 | if o == nil {
66 | return nil, false
67 | }
68 | return &o.Type, true
69 | }
70 |
71 | // SetType sets field value
72 | func (o *Action) SetType(v string) {
73 | o.Type = v
74 | }
75 |
76 | // GetSlackDestination returns the SlackDestination field value if set, zero value otherwise.
77 | func (o *Action) GetSlackDestination() ActionSlackDestination {
78 | if o == nil || IsNil(o.SlackDestination) {
79 | var ret ActionSlackDestination
80 | return ret
81 | }
82 | return *o.SlackDestination
83 | }
84 |
85 | // GetSlackDestinationOk returns a tuple with the SlackDestination field value if set, nil otherwise
86 | // and a boolean to check if the value has been set.
87 | func (o *Action) GetSlackDestinationOk() (*ActionSlackDestination, bool) {
88 | if o == nil || IsNil(o.SlackDestination) {
89 | return nil, false
90 | }
91 | return o.SlackDestination, true
92 | }
93 |
94 | // HasSlackDestination returns a boolean if a field has been set.
95 | func (o *Action) HasSlackDestination() bool {
96 | if o != nil && !IsNil(o.SlackDestination) {
97 | return true
98 | }
99 |
100 | return false
101 | }
102 |
103 | // SetSlackDestination gets a reference to the given ActionSlackDestination and assigns it to the SlackDestination field.
104 | func (o *Action) SetSlackDestination(v ActionSlackDestination) {
105 | o.SlackDestination = &v
106 | }
107 |
108 | // GetPagerDutyDestination returns the PagerDutyDestination field value if set, zero value otherwise.
109 | func (o *Action) GetPagerDutyDestination() ActionPagerDutyDestination {
110 | if o == nil || IsNil(o.PagerDutyDestination) {
111 | var ret ActionPagerDutyDestination
112 | return ret
113 | }
114 | return *o.PagerDutyDestination
115 | }
116 |
117 | // GetPagerDutyDestinationOk returns a tuple with the PagerDutyDestination field value if set, nil otherwise
118 | // and a boolean to check if the value has been set.
119 | func (o *Action) GetPagerDutyDestinationOk() (*ActionPagerDutyDestination, bool) {
120 | if o == nil || IsNil(o.PagerDutyDestination) {
121 | return nil, false
122 | }
123 | return o.PagerDutyDestination, true
124 | }
125 |
126 | // HasPagerDutyDestination returns a boolean if a field has been set.
127 | func (o *Action) HasPagerDutyDestination() bool {
128 | if o != nil && !IsNil(o.PagerDutyDestination) {
129 | return true
130 | }
131 |
132 | return false
133 | }
134 |
135 | // SetPagerDutyDestination gets a reference to the given ActionPagerDutyDestination and assigns it to the PagerDutyDestination field.
136 | func (o *Action) SetPagerDutyDestination(v ActionPagerDutyDestination) {
137 | o.PagerDutyDestination = &v
138 | }
139 |
140 | // GetEmailDestination returns the EmailDestination field value if set, zero value otherwise.
141 | func (o *Action) GetEmailDestination() ActionEmailDestination {
142 | if o == nil || IsNil(o.EmailDestination) {
143 | var ret ActionEmailDestination
144 | return ret
145 | }
146 | return *o.EmailDestination
147 | }
148 |
149 | // GetEmailDestinationOk returns a tuple with the EmailDestination field value if set, nil otherwise
150 | // and a boolean to check if the value has been set.
151 | func (o *Action) GetEmailDestinationOk() (*ActionEmailDestination, bool) {
152 | if o == nil || IsNil(o.EmailDestination) {
153 | return nil, false
154 | }
155 | return o.EmailDestination, true
156 | }
157 |
158 | // HasEmailDestination returns a boolean if a field has been set.
159 | func (o *Action) HasEmailDestination() bool {
160 | if o != nil && !IsNil(o.EmailDestination) {
161 | return true
162 | }
163 |
164 | return false
165 | }
166 |
167 | // SetEmailDestination gets a reference to the given ActionEmailDestination and assigns it to the EmailDestination field.
168 | func (o *Action) SetEmailDestination(v ActionEmailDestination) {
169 | o.EmailDestination = &v
170 | }
171 |
172 | // GetWebhookDestination returns the WebhookDestination field value if set, zero value otherwise.
173 | func (o *Action) GetWebhookDestination() ActionWebhookDestination {
174 | if o == nil || IsNil(o.WebhookDestination) {
175 | var ret ActionWebhookDestination
176 | return ret
177 | }
178 | return *o.WebhookDestination
179 | }
180 |
181 | // GetWebhookDestinationOk returns a tuple with the WebhookDestination field value if set, nil otherwise
182 | // and a boolean to check if the value has been set.
183 | func (o *Action) GetWebhookDestinationOk() (*ActionWebhookDestination, bool) {
184 | if o == nil || IsNil(o.WebhookDestination) {
185 | return nil, false
186 | }
187 | return o.WebhookDestination, true
188 | }
189 |
190 | // HasWebhookDestination returns a boolean if a field has been set.
191 | func (o *Action) HasWebhookDestination() bool {
192 | if o != nil && !IsNil(o.WebhookDestination) {
193 | return true
194 | }
195 |
196 | return false
197 | }
198 |
199 | // SetWebhookDestination gets a reference to the given ActionWebhookDestination and assigns it to the WebhookDestination field.
200 | func (o *Action) SetWebhookDestination(v ActionWebhookDestination) {
201 | o.WebhookDestination = &v
202 | }
203 |
204 | func (o Action) MarshalJSON() ([]byte, error) {
205 | toSerialize, err := o.ToMap()
206 | if err != nil {
207 | return []byte{}, err
208 | }
209 | return json.Marshal(toSerialize)
210 | }
211 |
212 | func (o Action) ToMap() (map[string]interface{}, error) {
213 | toSerialize := map[string]interface{}{}
214 | toSerialize["type"] = o.Type
215 | if !IsNil(o.SlackDestination) {
216 | toSerialize["slackDestination"] = o.SlackDestination
217 | }
218 | if !IsNil(o.PagerDutyDestination) {
219 | toSerialize["pagerDutyDestination"] = o.PagerDutyDestination
220 | }
221 | if !IsNil(o.EmailDestination) {
222 | toSerialize["emailDestination"] = o.EmailDestination
223 | }
224 | if !IsNil(o.WebhookDestination) {
225 | toSerialize["webhookDestination"] = o.WebhookDestination
226 | }
227 | return toSerialize, nil
228 | }
229 |
230 | func (o *Action) UnmarshalJSON(data []byte) (err error) {
231 | // This validates that all required properties are included in the JSON object
232 | // by unmarshalling the object into a generic map with string keys and checking
233 | // that every required field exists as a key in the generic map.
234 | requiredProperties := []string{
235 | "type",
236 | }
237 |
238 | allProperties := make(map[string]interface{})
239 |
240 | err = json.Unmarshal(data, &allProperties)
241 |
242 | if err != nil {
243 | return err
244 | }
245 |
246 | for _, requiredProperty := range requiredProperties {
247 | if _, exists := allProperties[requiredProperty]; !exists {
248 | return fmt.Errorf("no value given for required property %v", requiredProperty)
249 | }
250 | }
251 |
252 | varAction := _Action{}
253 |
254 | decoder := json.NewDecoder(bytes.NewReader(data))
255 | decoder.DisallowUnknownFields()
256 | err = decoder.Decode(&varAction)
257 |
258 | if err != nil {
259 | return err
260 | }
261 |
262 | *o = Action(varAction)
263 |
264 | return err
265 | }
266 |
267 | type NullableAction struct {
268 | value *Action
269 | isSet bool
270 | }
271 |
272 | func (v NullableAction) Get() *Action {
273 | return v.value
274 | }
275 |
276 | func (v *NullableAction) Set(val *Action) {
277 | v.value = val
278 | v.isSet = true
279 | }
280 |
281 | func (v NullableAction) IsSet() bool {
282 | return v.isSet
283 | }
284 |
285 | func (v *NullableAction) Unset() {
286 | v.value = nil
287 | v.isSet = false
288 | }
289 |
290 | func NewNullableAction(val *Action) *NullableAction {
291 | return &NullableAction{value: val, isSet: true}
292 | }
293 |
294 | func (v NullableAction) MarshalJSON() ([]byte, error) {
295 | return json.Marshal(v.value)
296 | }
297 |
298 | func (v *NullableAction) UnmarshalJSON(src []byte) error {
299 | v.isSet = true
300 | return json.Unmarshal(src, &v.value)
301 | }
302 |
```
--------------------------------------------------------------------------------
/tools/create_alert.go:
--------------------------------------------------------------------------------
```go
1 | package tools
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "time"
9 |
10 | "github.com/google/uuid"
11 | mcpgolang "github.com/metoro-io/mcp-golang"
12 | "github.com/metoro-io/metoro-mcp-server/model"
13 | "github.com/metoro-io/metoro-mcp-server/utils"
14 | )
15 |
16 | type CreateAlertHandlerArgs struct {
17 | AlertName string `json:"alert_name" jsonschema:"required,description=The name of the alert to create"`
18 | AlertDescription string `json:"alert_description" jsonschema:"required,description=The description of the alert to create"`
19 | Timeseries []model.MetricSpecifier `json:"timeseries" jsonschema:"required,description=Array of timeseries data to get. Each item in this array corresponds to a single timeseries. You can then use the formulas to combine these timeseries. If you only want to see the combination of timeseries via defining formulas and if you dont want to see the individual timeseries data when setting formulas you can set shouldNotReturn to true. For each timeseries make sure to set the type."`
20 | Formula model.Formula `json:"formula" jsonschema:"description=Optional formula to combine timeseries. Formula should only consist of formulaIdentifier of the timeseries in the timeseries array. e.g. a + b + c if a b c appears in the formulaIdentifier of the timeseries array. You can ONLY do the following operations: Arithmetic operations:+ (for add) - (for substract) * (for multiply) / (for division) % (for modulus) ^ or ** (for exponent). Comparison: == != < > <= >= . Logical:! (for not) && (for AND) || (for OR). Conditional operations: ?: (ternary) e.g. (a || b) ? 1 : 0. Do not guess the operations. Just use these available ones!"`
21 | Condition string `json:"condition" jsonschema:"required,enum=GreaterThan,enum=LessThan,enum=GreaterThanOrEqual,enum=LessThanOrEqual,description=the arithmetic comparison to use to evaluate whether an alert is firing or not. This is used to determine whether the alert should be triggered based on the threshold value."`
22 | Threshold float64 `json:"threshold" jsonschema:"required,description=The threshold value for the alert. This is the value that will be used together with the the arithmetic condition to see whether the alert should be triggered or not. For example if you set the condition to GreaterThan and the threshold to 100 then the alert will fire if the value of the timeseries is greater than 100."`
23 | DatapointsToAlarm int64 `json:"datapoints_to_alarm" jsonschema:"required,description=The number of datapoints that need to breach the threshold for the alert to be triggered"`
24 | EvaluationWindow int64 `json:"evaluation_window" jsonschema:"required,description=The evaluation window in number of datapoints. This is the number of datapoints that will be considered for evaluating the alert condition. For example if you set this to then the last 5 datapoints will be considered for evaluating the alert condition. This is useful for smoothing out spikes in the data and preventing false positives."`
25 | }
26 |
27 | func CreateAlertHandler(ctx context.Context, arguments CreateAlertHandlerArgs) (*mcpgolang.ToolResponse, error) {
28 | alert, err := createAlertFromTimeseries(ctx, arguments.AlertName, arguments.AlertDescription, arguments.Timeseries, arguments.Formula, arguments.Condition, arguments.Threshold, arguments.DatapointsToAlarm, arguments.EvaluationWindow)
29 | if err != nil {
30 | return nil, fmt.Errorf("error creating alert properties: %v", err)
31 | }
32 |
33 | newAlertRequest := model.CreateUpdateAlertRequest{
34 | Alert: alert,
35 | }
36 |
37 | resp, err := setAlertMetoroCall(ctx, newAlertRequest)
38 | if err != nil {
39 | return nil, fmt.Errorf("error setting dashboard: %v", err)
40 | }
41 | return mcpgolang.NewToolResponse(mcpgolang.NewTextContent(fmt.Sprintf("%s", string(resp)))), nil
42 | }
43 |
44 | // TODO: Implement the conversion logic.
45 | func createAlertFromTimeseries(ctx context.Context, alertName, alertDescription string, timeseries []model.MetricSpecifier, formula model.Formula, condition string, threshold float64, datapointsToAlarm int64, evaluationWindow int64) (model.Alert, error) {
46 | // Create dummy time range for the last 10 minutes to validate the timeseries
47 | endTime := time.Now().Unix()
48 | startTime := endTime - 600 // 10 minutes ago
49 |
50 | // Convert MetricSpecifier to SingleTimeseriesRequest for validation
51 | singleTimeseriesRequests := convertMetricSpecifierToSingleTimeseries(timeseries)
52 |
53 | err := checkTimeseries(ctx, singleTimeseriesRequests, startTime, endTime)
54 | if err != nil {
55 | return model.Alert{}, err
56 | }
57 | metoroQlQueries, err := convertMetricSpecifierToMetoroQL(ctx, timeseries, []model.Formula{formula})
58 | if err != nil {
59 | return model.Alert{}, fmt.Errorf("error converting metric specifiers to MetoroQL: %v", err)
60 | }
61 |
62 | // Convert condition string to OperatorType
63 | var operatorType model.OperatorType
64 | switch condition {
65 | case "GreaterThan":
66 | operatorType = model.GREATER_THAN
67 | case "LessThan":
68 | operatorType = model.LESS_THAN
69 | case "GreaterThanOrEqual":
70 | operatorType = model.GREATER_THAN_OR_EQUAL
71 | case "LessThanOrEqual":
72 | operatorType = model.LESS_THAN_OR_EQUAL
73 | default:
74 | return model.Alert{}, fmt.Errorf("invalid condition: %s", condition)
75 | }
76 |
77 | // Determine bucket size from the timeseries
78 | bucketSize := int64(60) // default to 60 seconds
79 | if len(timeseries) > 0 && timeseries[0].BucketSize > 0 {
80 | bucketSize = timeseries[0].BucketSize
81 | }
82 |
83 | // Use the first MetoroQL query (usually the combined formula result)
84 | query := ""
85 | if len(metoroQlQueries) > 0 {
86 | for _, q := range metoroQlQueries {
87 | if q != "" {
88 | query = q
89 | break
90 | }
91 | }
92 | }
93 |
94 | // Create the alert
95 | conditionType := model.STATIC
96 | timeseriesType := model.TIMESERIES
97 | alert := model.Alert{
98 | Metadata: model.MetadataObject{
99 | Name: alertName,
100 | Description: &alertDescription,
101 | Id: uuid.NewString(),
102 | },
103 | Type: ×eriesType,
104 | Timeseries: model.TimeseriesConfig{
105 | Expression: model.ExpressionConfig{
106 | MetoroQLTimeseries: &model.MetoroQlTimeseries{
107 | Query: query,
108 | BucketSize: bucketSize,
109 | },
110 | },
111 | EvaluationRules: []model.Condition{
112 | {
113 | Name: "Alert Condition",
114 | Type: &conditionType,
115 | Static: &model.StaticCondition{
116 | Operators: []model.OperatorConfig{
117 | {
118 | Operator: operatorType,
119 | Threshold: threshold,
120 | },
121 | },
122 | PersistenceSettings: model.PersistenceSettings{
123 | DatapointsToAlarm: datapointsToAlarm,
124 | DatapointsInEvaluationWindow: evaluationWindow,
125 | },
126 | },
127 | },
128 | },
129 | },
130 | }
131 |
132 | return alert, nil
133 | }
134 |
135 | func convertMetricSpecifierToMetoroQL(ctx context.Context, metricSpecs []model.MetricSpecifier, formulas []model.Formula) ([]string, error) {
136 | req := model.MetricSpecifiersRequest{
137 | MetricSpecifiers: metricSpecs,
138 | Formulas: formulas,
139 | }
140 | requestBody, err := json.Marshal(req)
141 | if err != nil {
142 | return nil, fmt.Errorf("error marshaling MetricSpecifiersRequest: %v", err)
143 | }
144 | resp, err := utils.MakeMetoroAPIRequest("POST", "metoroql/convert/metricSpecifierToMetoroql", bytes.NewBuffer(requestBody), utils.GetAPIRequirementsFromRequest(ctx))
145 | if err != nil {
146 | return nil, fmt.Errorf("error making MetoroQL conversion request: %v", err)
147 | }
148 | var metoroQLQueriesResp model.MetricSpecifierToMetoroQLResponse
149 | if err := json.Unmarshal(resp, &metoroQLQueriesResp); err != nil {
150 | return nil, fmt.Errorf("error unmarshaling MetoroQL conversion response: %v", err)
151 | }
152 | if len(metoroQLQueriesResp.Queries) == 0 {
153 | return nil, fmt.Errorf("no MetoroQL queries returned from conversion")
154 | }
155 | return metoroQLQueriesResp.Queries, nil
156 | }
157 |
158 | func setAlertMetoroCall(ctx context.Context, request model.CreateUpdateAlertRequest) ([]byte, error) {
159 | requestBody, err := json.Marshal(request)
160 | if err != nil {
161 | return nil, fmt.Errorf("error marshaling alert request: %v", err)
162 | }
163 | return utils.MakeMetoroAPIRequest("POST", "alerts/update", bytes.NewBuffer(requestBody), utils.GetAPIRequirementsFromRequest(ctx))
164 | }
165 |
166 | // convertMetricSpecifierToSingleTimeseries converts MetricSpecifier to SingleTimeseriesRequest
167 | func convertMetricSpecifierToSingleTimeseries(metricSpecs []model.MetricSpecifier) []model.SingleTimeseriesRequest {
168 | result := make([]model.SingleTimeseriesRequest, len(metricSpecs))
169 | for i, spec := range metricSpecs {
170 | result[i] = model.SingleTimeseriesRequest{
171 | Type: spec.MetricType,
172 | MetricName: spec.MetricName,
173 | Aggregation: spec.Aggregation,
174 | Filters: spec.Filters,
175 | ExcludeFilters: spec.ExcludeFilters,
176 | Splits: spec.Splits,
177 | Regexes: spec.Regexes,
178 | ExcludeRegexes: spec.ExcludeRegexes,
179 | BucketSize: spec.BucketSize,
180 | Functions: spec.Functions,
181 | ShouldNotReturn: spec.ShouldNotReturn,
182 | FormulaIdentifier: "",
183 | }
184 | }
185 | return result
186 | }
187 |
```
--------------------------------------------------------------------------------
/model/model_timeseries_specifier.go:
--------------------------------------------------------------------------------
```go
1 | /*
2 | Metoro Alerts API
3 |
4 | API for managing alerts in the Metoro observability platform.
5 |
6 | API version: 1.0.0
7 | */
8 |
9 | // Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
10 |
11 | package model
12 |
13 | import (
14 | "encoding/json"
15 | "bytes"
16 | "fmt"
17 | )
18 |
19 | // checks if the TimeseriesSpecifier type satisfies the MappedNullable interface at compile time
20 | var _ MappedNullable = &TimeseriesSpecifier{}
21 |
22 | // TimeseriesSpecifier Specification for a timeseries
23 | type TimeseriesSpecifier struct {
24 | // Type of timeseries
25 | Type string `json:"type"`
26 | Metric *TimeseriesSpecifierMetric `json:"metric,omitempty"`
27 | KubernetesResource *TimeseriesSpecifierKubernetesResource `json:"kubernetes_resource,omitempty"`
28 | Logs *TimeseriesSpecifierLogs `json:"logs,omitempty"`
29 | Traces *TimeseriesSpecifierTraces `json:"traces,omitempty"`
30 | // Filters to apply to the timeseries
31 | Filters []TimeseriesSpecifierFiltersInner `json:"filters,omitempty"`
32 | // Aggregation function to apply
33 | Aggregation *string `json:"aggregation,omitempty"`
34 | // Size of buckets in seconds for data aggregation
35 | BucketSize *int64 `json:"bucketSize,omitempty"`
36 | // Fields to split the results by
37 | Splits []string `json:"splits,omitempty"`
38 | // Functions to apply to the timeseries
39 | Functions []TimeseriesSpecifierFunction `json:"functions,omitempty"`
40 | }
41 |
42 | type _TimeseriesSpecifier TimeseriesSpecifier
43 |
44 | // NewTimeseriesSpecifier instantiates a new TimeseriesSpecifier object
45 | // This constructor will assign default values to properties that have it defined,
46 | // and makes sure properties required by API are set, but the set of arguments
47 | // will change when the set of required properties is changed
48 | func NewTimeseriesSpecifier(type_ string) *TimeseriesSpecifier {
49 | this := TimeseriesSpecifier{}
50 | this.Type = type_
51 | return &this
52 | }
53 |
54 | // NewTimeseriesSpecifierWithDefaults instantiates a new TimeseriesSpecifier object
55 | // This constructor will only assign default values to properties that have it defined,
56 | // but it doesn't guarantee that properties required by API are set
57 | func NewTimeseriesSpecifierWithDefaults() *TimeseriesSpecifier {
58 | this := TimeseriesSpecifier{}
59 | return &this
60 | }
61 |
62 | // GetType returns the Type field value
63 | func (o *TimeseriesSpecifier) GetType() string {
64 | if o == nil {
65 | var ret string
66 | return ret
67 | }
68 |
69 | return o.Type
70 | }
71 |
72 | // GetTypeOk returns a tuple with the Type field value
73 | // and a boolean to check if the value has been set.
74 | func (o *TimeseriesSpecifier) GetTypeOk() (*string, bool) {
75 | if o == nil {
76 | return nil, false
77 | }
78 | return &o.Type, true
79 | }
80 |
81 | // SetType sets field value
82 | func (o *TimeseriesSpecifier) SetType(v string) {
83 | o.Type = v
84 | }
85 |
86 | // GetMetric returns the Metric field value if set, zero value otherwise.
87 | func (o *TimeseriesSpecifier) GetMetric() TimeseriesSpecifierMetric {
88 | if o == nil || IsNil(o.Metric) {
89 | var ret TimeseriesSpecifierMetric
90 | return ret
91 | }
92 | return *o.Metric
93 | }
94 |
95 | // GetMetricOk returns a tuple with the Metric field value if set, nil otherwise
96 | // and a boolean to check if the value has been set.
97 | func (o *TimeseriesSpecifier) GetMetricOk() (*TimeseriesSpecifierMetric, bool) {
98 | if o == nil || IsNil(o.Metric) {
99 | return nil, false
100 | }
101 | return o.Metric, true
102 | }
103 |
104 | // HasMetric returns a boolean if a field has been set.
105 | func (o *TimeseriesSpecifier) HasMetric() bool {
106 | if o != nil && !IsNil(o.Metric) {
107 | return true
108 | }
109 |
110 | return false
111 | }
112 |
113 | // SetMetric gets a reference to the given TimeseriesSpecifierMetric and assigns it to the Metric field.
114 | func (o *TimeseriesSpecifier) SetMetric(v TimeseriesSpecifierMetric) {
115 | o.Metric = &v
116 | }
117 |
118 | // GetKubernetesResource returns the KubernetesResource field value if set, zero value otherwise.
119 | func (o *TimeseriesSpecifier) GetKubernetesResource() TimeseriesSpecifierKubernetesResource {
120 | if o == nil || IsNil(o.KubernetesResource) {
121 | var ret TimeseriesSpecifierKubernetesResource
122 | return ret
123 | }
124 | return *o.KubernetesResource
125 | }
126 |
127 | // GetKubernetesResourceOk returns a tuple with the KubernetesResource field value if set, nil otherwise
128 | // and a boolean to check if the value has been set.
129 | func (o *TimeseriesSpecifier) GetKubernetesResourceOk() (*TimeseriesSpecifierKubernetesResource, bool) {
130 | if o == nil || IsNil(o.KubernetesResource) {
131 | return nil, false
132 | }
133 | return o.KubernetesResource, true
134 | }
135 |
136 | // HasKubernetesResource returns a boolean if a field has been set.
137 | func (o *TimeseriesSpecifier) HasKubernetesResource() bool {
138 | if o != nil && !IsNil(o.KubernetesResource) {
139 | return true
140 | }
141 |
142 | return false
143 | }
144 |
145 | // SetKubernetesResource gets a reference to the given TimeseriesSpecifierKubernetesResource and assigns it to the KubernetesResource field.
146 | func (o *TimeseriesSpecifier) SetKubernetesResource(v TimeseriesSpecifierKubernetesResource) {
147 | o.KubernetesResource = &v
148 | }
149 |
150 | // GetLogs returns the Logs field value if set, zero value otherwise.
151 | func (o *TimeseriesSpecifier) GetLogs() TimeseriesSpecifierLogs {
152 | if o == nil || IsNil(o.Logs) {
153 | var ret TimeseriesSpecifierLogs
154 | return ret
155 | }
156 | return *o.Logs
157 | }
158 |
159 | // GetLogsOk returns a tuple with the Logs field value if set, nil otherwise
160 | // and a boolean to check if the value has been set.
161 | func (o *TimeseriesSpecifier) GetLogsOk() (*TimeseriesSpecifierLogs, bool) {
162 | if o == nil || IsNil(o.Logs) {
163 | return nil, false
164 | }
165 | return o.Logs, true
166 | }
167 |
168 | // HasLogs returns a boolean if a field has been set.
169 | func (o *TimeseriesSpecifier) HasLogs() bool {
170 | if o != nil && !IsNil(o.Logs) {
171 | return true
172 | }
173 |
174 | return false
175 | }
176 |
177 | // SetLogs gets a reference to the given TimeseriesSpecifierLogs and assigns it to the Logs field.
178 | func (o *TimeseriesSpecifier) SetLogs(v TimeseriesSpecifierLogs) {
179 | o.Logs = &v
180 | }
181 |
182 | // GetTraces returns the Traces field value if set, zero value otherwise.
183 | func (o *TimeseriesSpecifier) GetTraces() TimeseriesSpecifierTraces {
184 | if o == nil || IsNil(o.Traces) {
185 | var ret TimeseriesSpecifierTraces
186 | return ret
187 | }
188 | return *o.Traces
189 | }
190 |
191 | // GetTracesOk returns a tuple with the Traces field value if set, nil otherwise
192 | // and a boolean to check if the value has been set.
193 | func (o *TimeseriesSpecifier) GetTracesOk() (*TimeseriesSpecifierTraces, bool) {
194 | if o == nil || IsNil(o.Traces) {
195 | return nil, false
196 | }
197 | return o.Traces, true
198 | }
199 |
200 | // HasTraces returns a boolean if a field has been set.
201 | func (o *TimeseriesSpecifier) HasTraces() bool {
202 | if o != nil && !IsNil(o.Traces) {
203 | return true
204 | }
205 |
206 | return false
207 | }
208 |
209 | // SetTraces gets a reference to the given TimeseriesSpecifierTraces and assigns it to the Traces field.
210 | func (o *TimeseriesSpecifier) SetTraces(v TimeseriesSpecifierTraces) {
211 | o.Traces = &v
212 | }
213 |
214 | // GetFilters returns the Filters field value if set, zero value otherwise.
215 | func (o *TimeseriesSpecifier) GetFilters() []TimeseriesSpecifierFiltersInner {
216 | if o == nil || IsNil(o.Filters) {
217 | var ret []TimeseriesSpecifierFiltersInner
218 | return ret
219 | }
220 | return o.Filters
221 | }
222 |
223 | // GetFiltersOk returns a tuple with the Filters field value if set, nil otherwise
224 | // and a boolean to check if the value has been set.
225 | func (o *TimeseriesSpecifier) GetFiltersOk() ([]TimeseriesSpecifierFiltersInner, bool) {
226 | if o == nil || IsNil(o.Filters) {
227 | return nil, false
228 | }
229 | return o.Filters, true
230 | }
231 |
232 | // HasFilters returns a boolean if a field has been set.
233 | func (o *TimeseriesSpecifier) HasFilters() bool {
234 | if o != nil && !IsNil(o.Filters) {
235 | return true
236 | }
237 |
238 | return false
239 | }
240 |
241 | // SetFilters gets a reference to the given []TimeseriesSpecifierFiltersInner and assigns it to the Filters field.
242 | func (o *TimeseriesSpecifier) SetFilters(v []TimeseriesSpecifierFiltersInner) {
243 | o.Filters = v
244 | }
245 |
246 | // GetAggregation returns the Aggregation field value if set, zero value otherwise.
247 | func (o *TimeseriesSpecifier) GetAggregation() string {
248 | if o == nil || IsNil(o.Aggregation) {
249 | var ret string
250 | return ret
251 | }
252 | return *o.Aggregation
253 | }
254 |
255 | // GetAggregationOk returns a tuple with the Aggregation field value if set, nil otherwise
256 | // and a boolean to check if the value has been set.
257 | func (o *TimeseriesSpecifier) GetAggregationOk() (*string, bool) {
258 | if o == nil || IsNil(o.Aggregation) {
259 | return nil, false
260 | }
261 | return o.Aggregation, true
262 | }
263 |
264 | // HasAggregation returns a boolean if a field has been set.
265 | func (o *TimeseriesSpecifier) HasAggregation() bool {
266 | if o != nil && !IsNil(o.Aggregation) {
267 | return true
268 | }
269 |
270 | return false
271 | }
272 |
273 | // SetAggregation gets a reference to the given string and assigns it to the Aggregation field.
274 | func (o *TimeseriesSpecifier) SetAggregation(v string) {
275 | o.Aggregation = &v
276 | }
277 |
278 | // GetBucketSize returns the BucketSize field value if set, zero value otherwise.
279 | func (o *TimeseriesSpecifier) GetBucketSize() int64 {
280 | if o == nil || IsNil(o.BucketSize) {
281 | var ret int64
282 | return ret
283 | }
284 | return *o.BucketSize
285 | }
286 |
287 | // GetBucketSizeOk returns a tuple with the BucketSize field value if set, nil otherwise
288 | // and a boolean to check if the value has been set.
289 | func (o *TimeseriesSpecifier) GetBucketSizeOk() (*int64, bool) {
290 | if o == nil || IsNil(o.BucketSize) {
291 | return nil, false
292 | }
293 | return o.BucketSize, true
294 | }
295 |
296 | // HasBucketSize returns a boolean if a field has been set.
297 | func (o *TimeseriesSpecifier) HasBucketSize() bool {
298 | if o != nil && !IsNil(o.BucketSize) {
299 | return true
300 | }
301 |
302 | return false
303 | }
304 |
305 | // SetBucketSize gets a reference to the given int64 and assigns it to the BucketSize field.
306 | func (o *TimeseriesSpecifier) SetBucketSize(v int64) {
307 | o.BucketSize = &v
308 | }
309 |
310 | // GetSplits returns the Splits field value if set, zero value otherwise.
311 | func (o *TimeseriesSpecifier) GetSplits() []string {
312 | if o == nil || IsNil(o.Splits) {
313 | var ret []string
314 | return ret
315 | }
316 | return o.Splits
317 | }
318 |
319 | // GetSplitsOk returns a tuple with the Splits field value if set, nil otherwise
320 | // and a boolean to check if the value has been set.
321 | func (o *TimeseriesSpecifier) GetSplitsOk() ([]string, bool) {
322 | if o == nil || IsNil(o.Splits) {
323 | return nil, false
324 | }
325 | return o.Splits, true
326 | }
327 |
328 | // HasSplits returns a boolean if a field has been set.
329 | func (o *TimeseriesSpecifier) HasSplits() bool {
330 | if o != nil && !IsNil(o.Splits) {
331 | return true
332 | }
333 |
334 | return false
335 | }
336 |
337 | // SetSplits gets a reference to the given []string and assigns it to the Splits field.
338 | func (o *TimeseriesSpecifier) SetSplits(v []string) {
339 | o.Splits = v
340 | }
341 |
342 | // GetFunctions returns the Functions field value if set, zero value otherwise.
343 | func (o *TimeseriesSpecifier) GetFunctions() []TimeseriesSpecifierFunction {
344 | if o == nil || IsNil(o.Functions) {
345 | var ret []TimeseriesSpecifierFunction
346 | return ret
347 | }
348 | return o.Functions
349 | }
350 |
351 | // GetFunctionsOk returns a tuple with the Functions field value if set, nil otherwise
352 | // and a boolean to check if the value has been set.
353 | func (o *TimeseriesSpecifier) GetFunctionsOk() ([]TimeseriesSpecifierFunction, bool) {
354 | if o == nil || IsNil(o.Functions) {
355 | return nil, false
356 | }
357 | return o.Functions, true
358 | }
359 |
360 | // HasFunctions returns a boolean if a field has been set.
361 | func (o *TimeseriesSpecifier) HasFunctions() bool {
362 | if o != nil && !IsNil(o.Functions) {
363 | return true
364 | }
365 |
366 | return false
367 | }
368 |
369 | // SetFunctions gets a reference to the given []TimeseriesSpecifierFunction and assigns it to the Functions field.
370 | func (o *TimeseriesSpecifier) SetFunctions(v []TimeseriesSpecifierFunction) {
371 | o.Functions = v
372 | }
373 |
374 | func (o TimeseriesSpecifier) MarshalJSON() ([]byte, error) {
375 | toSerialize,err := o.ToMap()
376 | if err != nil {
377 | return []byte{}, err
378 | }
379 | return json.Marshal(toSerialize)
380 | }
381 |
382 | func (o TimeseriesSpecifier) ToMap() (map[string]interface{}, error) {
383 | toSerialize := map[string]interface{}{}
384 | toSerialize["type"] = o.Type
385 | if !IsNil(o.Metric) {
386 | toSerialize["metric"] = o.Metric
387 | }
388 | if !IsNil(o.KubernetesResource) {
389 | toSerialize["kubernetes_resource"] = o.KubernetesResource
390 | }
391 | if !IsNil(o.Logs) {
392 | toSerialize["logs"] = o.Logs
393 | }
394 | if !IsNil(o.Traces) {
395 | toSerialize["traces"] = o.Traces
396 | }
397 | if !IsNil(o.Filters) {
398 | toSerialize["filters"] = o.Filters
399 | }
400 | if !IsNil(o.Aggregation) {
401 | toSerialize["aggregation"] = o.Aggregation
402 | }
403 | if !IsNil(o.BucketSize) {
404 | toSerialize["bucketSize"] = o.BucketSize
405 | }
406 | if !IsNil(o.Splits) {
407 | toSerialize["splits"] = o.Splits
408 | }
409 | if !IsNil(o.Functions) {
410 | toSerialize["functions"] = o.Functions
411 | }
412 | return toSerialize, nil
413 | }
414 |
415 | func (o *TimeseriesSpecifier) UnmarshalJSON(data []byte) (err error) {
416 | // This validates that all required properties are included in the JSON object
417 | // by unmarshalling the object into a generic map with string keys and checking
418 | // that every required field exists as a key in the generic map.
419 | requiredProperties := []string{
420 | "type",
421 | }
422 |
423 | allProperties := make(map[string]interface{})
424 |
425 | err = json.Unmarshal(data, &allProperties)
426 |
427 | if err != nil {
428 | return err;
429 | }
430 |
431 | for _, requiredProperty := range(requiredProperties) {
432 | if _, exists := allProperties[requiredProperty]; !exists {
433 | return fmt.Errorf("no value given for required property %v", requiredProperty)
434 | }
435 | }
436 |
437 | varTimeseriesSpecifier := _TimeseriesSpecifier{}
438 |
439 | decoder := json.NewDecoder(bytes.NewReader(data))
440 | decoder.DisallowUnknownFields()
441 | err = decoder.Decode(&varTimeseriesSpecifier)
442 |
443 | if err != nil {
444 | return err
445 | }
446 |
447 | *o = TimeseriesSpecifier(varTimeseriesSpecifier)
448 |
449 | return err
450 | }
451 |
452 | type NullableTimeseriesSpecifier struct {
453 | value *TimeseriesSpecifier
454 | isSet bool
455 | }
456 |
457 | func (v NullableTimeseriesSpecifier) Get() *TimeseriesSpecifier {
458 | return v.value
459 | }
460 |
461 | func (v *NullableTimeseriesSpecifier) Set(val *TimeseriesSpecifier) {
462 | v.value = val
463 | v.isSet = true
464 | }
465 |
466 | func (v NullableTimeseriesSpecifier) IsSet() bool {
467 | return v.isSet
468 | }
469 |
470 | func (v *NullableTimeseriesSpecifier) Unset() {
471 | v.value = nil
472 | v.isSet = false
473 | }
474 |
475 | func NewNullableTimeseriesSpecifier(val *TimeseriesSpecifier) *NullableTimeseriesSpecifier {
476 | return &NullableTimeseriesSpecifier{value: val, isSet: true}
477 | }
478 |
479 | func (v NullableTimeseriesSpecifier) MarshalJSON() ([]byte, error) {
480 | return json.Marshal(v.value)
481 | }
482 |
483 | func (v *NullableTimeseriesSpecifier) UnmarshalJSON(src []byte) error {
484 | v.isSet = true
485 | return json.Unmarshal(src, &v.value)
486 | }
487 |
488 |
489 |
```
--------------------------------------------------------------------------------
/tools/tools.go:
--------------------------------------------------------------------------------
```go
1 | package tools
2 |
3 | type MetoroTools struct {
4 | Name string
5 | Description string
6 | Handler any
7 | }
8 |
9 | var MetoroToolsList = []MetoroTools{
10 | {
11 | Name: "get_environments",
12 | Description: "Get Kubernetes environments/clusters. This tool is useful for listing the kubernetes environments/clusters that are monitored by Metoro.",
13 | Handler: GetEnvironmentsHandler,
14 | },
15 | {
16 | Name: "get_services",
17 | Description: "Get services running in your Kubernetes cluster. Metoro treats the following Kubernetes resources as a 'service': Deployment, StatefulSet, DaemonSet. This tool is useful for listing the services/workloads running in your Kubernetes cluster.",
18 | Handler: GetServicesHandler,
19 | },
20 | {
21 | Name: "get_namespaces",
22 | Description: "Get namespaces in your Kubernetes cluster. This tool is useful for listing the namespaces in your Kubernetes cluster.",
23 | Handler: GetNamespacesHandler,
24 | },
25 | {
26 | Name: "get_logs",
27 | Description: `Get logs from all or specific services/hosts/pods. Results are limited to 20 logs lines. Before using this you MUST first call get_attribute_keys and get_attribute_values to get the possible log attribute keys and values which can be used as Filter/ExcludeFilter keys.`,
28 | Handler: GetLogsHandler,
29 | },
30 | {
31 | Name: "get_traces",
32 | Description: `Get list of traces from your cluster. Results are limited to 20 traces so try to use filters to narrow down what you are looking for.
33 | Prior to using this tool, YOU MUST first call get_attribute_keys and subsequently get_attribute_values to get the possible trace attribute keys and values which can be used as Filter/ExcludeFilter keys.
34 | Use this tool when you are interested in the trace attributes to get more information to answer why/what. If you want more details about a specific trace use get_trace_spans to see individual span details.
35 | If you would like to check existence of traces use get_timeseries_data tool with type=trace to get count/p50/p90/p95/p99 of traces instead of using get_traces tool.
36 | After calling get traces you should normally call get_trace_spans to get the spans associated with the traceId you are interested in. When reading duration of a trace use the durationReadable field.`,
37 | Handler: GetTracesHandler,
38 | },
39 | {
40 | Name: "get_trace_spans",
41 | Description: `Get the spans associated with a specific traceId. This allows you to view the entire trace with all its spans in a tree like structure. You should basically always use this after calling get_traces tool to get the traceId you are interested in. This tool gives you all spans in a trace.`,
42 | Handler: GetTraceSpansHandler,
43 | },
44 | {
45 | Name: "get_traces_distribution",
46 | Description: `Gets the most common attribute - value pairs for the traces matching the filters.
47 | The provided filters are the same as the filters used in the get_traces tool.
48 | You should use this tool to understand if there are any common attributes in traces that you are interested in.
49 | For example if you have some failing traces you should use this tool and it might tell you that they are all on a specific path or coming from a specific pod or environment.
50 | This is a very useful tool and should often be used before calling get_traces.
51 | Prior to using this tool, YOU MUST first call get_attribute_keys and subsequently get_attribute_values to get the possible trace attribute keys and values which can be used as Filter/ExcludeFilter keys.
52 | `,
53 | Handler: GetTracesDistributionHandler,
54 | },
55 | {
56 | Name: "get_timeseries_data",
57 | Description: `Get one or more timeseries data for a metric or traces or logs or kubernetes resources. This tool is useful for understanding how the underlying type of data (specific/metric/trace/kubernetes resources/logs) change over time. You can also apply formulas to combine timeseries to calculate rates or ratios or differences etc. How to use this tool:
58 | First you need the type of timeseries data you are requesting for. This can be one of metric or traces or logs or kubernetes resources. If it is metrics then you HAVE TO call the get_metric_names tool to get the available metric names which can be used as MetricName argument for this tool.
59 | Then YOU HAVE TO call get_attribute_keys tool to retrieve the available attribute keys and get_attribute_values to retrieve values you are interested in to use in Filter/ExcludeFilter keys for this tool.
60 | You can also use Splits argument to group/split the metric data by the given metric attribute keys. Only use the attribute keys and values that are available for the MetricName that are returned from get_attribute_keys and get_attribute_values tools. If you are not getting proper results back then you might have forgotten to set the correct attribute keys and values. Try again with the correct attribute keys and values you get from get_attribute_values.
61 | Metrics of type counter (or with _total suffix) are cumulative metrics but Metoro querying engine already accounts for rate differences when returning the value so you don't need to calculate the rate/monotonic difference yourself. You can just query those metrics as they are without extra functions. If you are in doubt use the get_metric_metadata tool to get more information (description type unit) about the metric and how to use it.
62 | `,
63 | Handler: GetMultiMetricHandler,
64 | },
65 | {
66 | Name: "get_attribute_keys",
67 | Description: `Get the possible attribute keys for a specific type of data. This tool is useful for understanding the possible attribute keys that can be used for filtering the data. How to use this tool:
68 | First you need the type of data you are requesting for. This can be one of metric or traces or logs or kubernetes resources.
69 | Then you can call this tool to get the possible attribute keys for the given type of data.`,
70 | Handler: GetAttributeKeysHandler,
71 | },
72 | {
73 | Name: "get_attribute_values",
74 | Description: `"Get the possible values of an attribute key for a given type of data which can be one of metric trace logs or kubernetes_resource. This can be used as a value for a filtering key for filtering data. How to use this tool:
75 | First you need the type of data you are requesting for. This can be one of metric or traces or logs or kubernetes resources. Then you need the attribute keys for the given type of data. You can use get_attribute_keys tool to get the available attribute keys for the given type of data.
76 | Then you can call this tool to get the possible values for a given attribute key for the given type of data. If you want to get the possible values for a metric attribute key you can use the get_metric_names tool to get the available metric names which can be used as MetricName argument for this tool and then use get_attribute_keys tool to get the available attribute keys and get_attribute_values to get values for the key which can be used as Filter/ExcludeFilter keys for`,
77 | Handler: GetAttributeValuesHandler,
78 | },
79 | {
80 | Name: "get_profiles",
81 | Description: "Get cpu profiles of your services running in your Kubernetes cluster. This tool is useful for answering performance related questions for a specific service. It provides information about which functions taking time in the service.",
82 | Handler: GetProfilesHandler,
83 | },
84 | {
85 | Name: "get_k8s_events",
86 | Description: `Get the Kubernetes events from your clusters. Kubernetes events are useful for understanding what is happening with regards to your Kubernetes resources.
87 | They are emitted by the Kubernetes API server when there is a change in the state of the cluster. How to use this tool:
88 | First use get_k8s_events_attributes tool to retrieve the available Kubernetes event attribute keys which can be used as Filter/ExcludeFilter keys for this tool.
89 | Then use get_k8s_event_attribute_values_for_individual_attribute tool to get the possible values a Kubernetes event attribute key can be for filtering Kubernetes events.
90 | And then you can call this tool (get_k8s_events) to get the specific events you are looking for. e.g. Filter use case: get_k8s_events with filters: {key: [value]} for including specific Kubernetes events.`,
91 | Handler: GetK8sEventsHandler,
92 | },
93 | {
94 | Name: "get_k8s_events_attributes",
95 | Description: "Get possible attribute keys for Kubernetes events which can be used for filtering them.",
96 | Handler: GetK8sEventsAttributesHandler,
97 | },
98 | {
99 | Name: "get_k8s_event_attribute_values_for_individual_attribute",
100 | Description: "Get possible attribute values for a specific Kubernetes event attribute key. E.g. EventType attribute key might have values like Normal Warning etc.",
101 | Handler: GetK8sEventAttributeValuesForIndividualAttributeHandler,
102 | },
103 | {
104 | Name: "get_metric_names",
105 | Description: "Get available metric names to query. These metric names can be used as MetricName argument for get_metric get_metric_metadata and get_timeseries_data and get_attribute_keys tools.",
106 | Handler: GetMetricNamesHandler,
107 | },
108 | {
109 | Name: "get_metric_metadata",
110 | Description: "Get metric description and type and unit for a metric. This tool can be used to get detailed information about a metric including its type unit and description. Use this tool after getting the metric name that you are interested in from the get_metric_names tool and before calling the get_timeseries_data tool to understand the metric better.",
111 | Handler: GetMetricMetadata,
112 | },
113 | //{
114 | // Name: "get_pods",
115 | // Description: "Get the list of pods that are running in your cluster. This tool is useful for getting the name of the pods. You must provide either a ServiceName to get pods for a specific service or a NodeName to get pods running on a specific node.",
116 | // Handler: GetPodsHandler,
117 | //},
118 | {
119 | Name: "get_service_yaml",
120 | Description: "Returns environment and YAML of a kubernetes resource/service. This tool is useful for understanding the YAML configuration of a service.",
121 | Handler: GetK8sServiceInformationHandler,
122 | },
123 | {
124 | Name: "get_version_for_service",
125 | Description: "Get container IDs and their image versions for a specific service. This tool extracts container names and their image versions from the service YAML configuration.",
126 | Handler: GetVersionForServiceHandler,
127 | },
128 | {
129 | Name: "get_nodes",
130 | Description: "Get the nodes that are running in your cluster. To use this tool first call get_node_attributes to get the possible node attribute keys and values which can be used for filtering nodes.",
131 | Handler: GetNodesHandler,
132 | },
133 | {
134 | Name: "get_node_attributes",
135 | Description: "Get possible node attribute keys and values which can be used for filtering nodes.",
136 | Handler: GetNodeAttributesHandler,
137 | },
138 | {
139 | Name: "get_node_info",
140 | Description: "Get detailed node information about a specific node. This tool provides information about the node's capacity allocatable resources and usage yaml node type OS and Kernel information.",
141 | Handler: GetNodeInfoHandler,
142 | },
143 | {
144 | Name: "get_service_summaries",
145 | Description: "Get summaries of services/workloads running in your Kubernetes cluster. The summary includes the number of requests errors (5xx and 4xx) P50 p95 p99 latencies. This tool is useful for understanding the performance of your services at a high level for a given relative or abosulute time range.",
146 | Handler: GetServiceSummariesHandler,
147 | },
148 | {
149 | Name: "get_alerts",
150 | Description: "Get list of alerts from your Kubernetes cluster. These alerts are configured by the user in Metoro therefore it may not have full coverage for all the issues that might occur in the cluster.",
151 | Handler: GetAlertsHandler,
152 | },
153 | {
154 | Name: "get_alert_fires",
155 | Description: "Get list of alert fires from your Kubernetes cluster. Alert fires are the instances when an alert is triggered. This tool provides information about the alert name the time it was triggered the time it recovered the environment and the service name (if available) and the alert trigger message.",
156 | Handler: GetAlertFiresHandler,
157 | },
158 | {
159 | Name: "create_dashboard",
160 | Description: `Create a dashboard with the described metrics. This tool is useful for creating a dashboard with the metrics you are interested in.
161 | How to use this tool:
162 | First use get_metric_names tool to retrieve the available metric names which can be used as MetricName argument for this tool and then use get_attribute_keys tool to retrieve the available attribute keys and get_attribute_values for getting the values for the attribute key that you are interested in to use in Filter/ExcludeFilter keys or Splits argument for MetricChartWidget argument for this tool.
163 | You can also use Splits argument to group the metric data by the given metric attribute keys. Only use the attribute keys and values that are available for the MetricName that are returned from get_attribute_keys and get_attribute_values tools.`,
164 | Handler: CreateDashboardHandler,
165 | },
166 | {
167 | Name: "create_alert",
168 | Description: `Create an alert with the described metrics. This tool is useful for creating an alert with the timeseries data that you are interested in. How to use this tool:
169 | NEVER GUESS the attribute keys and values that will be used for filtering or splits. Always use trace_querier or log_querier or metric_querier to understand the available attribute keys and values for the type of data/timeseries you are interested in. Ask these tools for the available attribute keys and values and metric names etc before using this tool.`,
170 | Handler: CreateAlertHandler,
171 | },
172 | {
173 | Name: "get_source_repository",
174 | Description: "Get the source repository URL/path for a specific service. This tool is useful for finding where the code for a service is stored. You need to provide the service name time range and optionally specific environments to search in.",
175 | Handler: GetSourceRepositoryHandler,
176 | },
177 | {
178 | Name: "get_service_graph",
179 | Description: "Get the service graph showing which services make calls to a given service and which services the given service makes calls to. This tool is useful for understanding service dependencies and call patterns.",
180 | Handler: GetServiceGraphHandler,
181 | },
182 | {
183 | Name: "unix_to_rfc3339",
184 | Description: "Convert a Unix timestamp (in seconds or milliseconds) to RFC3339 format. The tool automatically detects whether the timestamp is in seconds or milliseconds based on its magnitude.",
185 | Handler: UnixToRFC3339Handler,
186 | },
187 | {
188 | Name: "get_resources_by_ip",
189 | Description: "Get kubernetes resource information by IP address. This tool finds resources (like pods or services) that had a specific IP address during a given time range in a specific environment. Useful for debugging network issues or tracking pod / service history.",
190 | Handler: GetResourcesByIpHandler,
191 | },
192 | {
193 | Name: "create_investigation",
194 | Description: "Create a new investigation to document and track an issue or incident. Investigations include a title, markdown content, optional tags, and optional issue time range.",
195 | Handler: CreateInvestigationHandler,
196 | },
197 | {
198 | Name: "update_investigation",
199 | Description: "Update an existing investigation by its UUID. Allows updating the title, markdown content, time range, and other properties of an investigation.",
200 | Handler: UpdateInvestigationHandler,
201 | },
202 | {
203 | Name: "list_investigations",
204 | Description: "List investigations with optional filtering by tags and pagination. Returns a list of investigations including their title, markdown content, tags, creation/update times, and issue time ranges.",
205 | Handler: ListInvestigationsHandler,
206 | },
207 | {
208 | Name: "create_ai_issue",
209 | Description: "Create a new AI issue record with a title and markdown description. Use this to capture issues that investigations can reference via issue UUID.",
210 | Handler: CreateAIIssueHandler,
211 | },
212 | {
213 | Name: "update_ai_issue",
214 | Description: "Update an existing AI issue by UUID. Allows changing the issue title and description.",
215 | Handler: UpdateAIIssueHandler,
216 | },
217 | {
218 | Name: "get_ai_issue",
219 | Description: "Fetch a single AI issue by UUID to view its current title, description, and metadata.",
220 | Handler: GetAIIssueHandler,
221 | },
222 | {
223 | Name: "list_ai_issues",
224 | Description: "List all AI issues for the organization. Useful for discovering available issue UUIDs and their metadata.",
225 | Handler: ListAIIssuesHandler,
226 | },
227 | {
228 | Name: "list_ai_issue_events",
229 | Description: "List timeline events for a specific AI issue, including commits, releases, and investigations associated with that issue.",
230 | Handler: ListAIIssueEventsHandler,
231 | },
232 | }
233 |
```
--------------------------------------------------------------------------------
/model/model.go:
--------------------------------------------------------------------------------
```go
1 | package model
2 |
3 | import "time"
4 |
5 | // TODO: This file should be replaced if we can import the types from Metoro repo directly.
6 | // These are just duplicates at the moment. If updated in Metoro repository, it should also be updated here!
7 |
8 | type GetLogsRequest struct {
9 | // Required: Start time of when to get the logs in seconds since epoch
10 | StartTime int64 `json:"startTime"`
11 | // Required: End time of when to get the logs in seconds since epoch
12 | EndTime int64 `json:"endTime"`
13 | // The filters to apply to the logs, so for example, if you want to get logs for a specific service
14 | // you can pass in a filter like {"service_name": ["microservice_a"]}
15 | Filters map[string][]string `json:"filters"`
16 | // ExcludeFilters are filters that should be excluded from the logs
17 | // For example, if you want to get logs for all services except microservice_a you can pass in
18 | // {"service_name": ["microservice_a"]}
19 | ExcludeFilters map[string][]string `json:"excludeFilters"`
20 | // Previous page endTime in nanoseconds, used to get the next page of logs if there are more logs than the page size
21 | // If omitted, the first page of logs will be returned
22 | PrevEndTime *int64 `json:"prevEndTime"`
23 | // Regexes are used to filter logs based on a regex inclusively
24 | Regexes []string `json:"regexes"`
25 | // ExcludeRegexes are used to filter logs based on a regex exclusively
26 | ExcludeRegexes []string `json:"excludeRegexes"`
27 | Ascending bool `json:"ascending"`
28 | // The cluster/environments to get the logs for. If empty, all clusters will be included
29 | Environments []string `json:"environments"`
30 | ExportLimit *int `json:"exportLimit,omitempty"` // Optional limit on the number of logs to export, defaults to 100 if not specified
31 | }
32 |
33 | type GetTracesRequest struct {
34 | ServiceNames []string `json:"serviceNames"`
35 | StartTime int64 `json:"startTime"`
36 | EndTime int64 `json:"endTime"`
37 | Filters map[string][]string `json:"filters"`
38 | ExcludeFilters map[string][]string `json:"excludeFilters"`
39 | PrevEndTime *int64 `json:"prevEndTime"`
40 | Regexes []string `json:"regexes"`
41 | ExcludeRegexes []string `json:"excludeRegexes"`
42 | Ascending bool `json:"ascending"`
43 | Environments []string `json:"environments"`
44 | Limit *int `json:"limit,omitempty"` // Optional limit on the number of traces to return
45 | }
46 |
47 | type Aggregation string
48 |
49 | const (
50 | AggregationSum Aggregation = "sum"
51 | AggregationAvg Aggregation = "avg"
52 | AggregationMax Aggregation = "max"
53 | AggregationMin Aggregation = "min"
54 | AggregationCount Aggregation = "count"
55 | AggregationP50 Aggregation = "p50"
56 | AggregationP90 Aggregation = "p90"
57 | AggregationP95 Aggregation = "p95"
58 | AggregationP99 Aggregation = "p99"
59 |
60 | // Only for trace metrics
61 | AggregationRequestSize Aggregation = "requestSize"
62 | AggregationResponseSize Aggregation = "responseSize"
63 | AggregationTotalSize Aggregation = "totalSize"
64 | )
65 |
66 | type MetricFunction struct {
67 | // The type of the function
68 | FunctionType FunctionType `json:"functionType" jsonschema:"required,enum=monotonicDifference,enum=valueDifference,enum=perSecond,description=The type of the function to apply to the metric. Do not guess the function type. Use the available ones: perSecond or valueDifference or monotonicDifference."`
69 | //// The payload of the function
70 | //// TODO: If we have more payloads this can be an interface but for now its a math expression since its the only payload.
71 | //FunctionPayload MathExpression `json:"functionPayload" jsonschema:"description=The payload of the customMathExpression. this is only set for customMathExpression. "`
72 | }
73 |
74 | type MathExpression struct {
75 | Variables []string `json:"variables" jsonschema:"description=The variables to use in the math expression. For now this should always be ['a'] if set"`
76 | Expression string `json:"expression" jsonschema:"description=The math expression to apply to the metric. For example if you want to divide the metric by 60 you would set the expression as a / 60"`
77 | }
78 |
79 | type FunctionType string
80 |
81 | const (
82 | MonotonicDifference FunctionType = "monotonicDifference"
83 | ValueDifference FunctionType = "valueDifference"
84 | )
85 |
86 | type GetMetricRequest struct {
87 | // MetricName is the name of the metric to get
88 | MetricName string `json:"metricName" jsonschema:"required,description=Name of the metric to get the timeseries data for. Do not guess the metricName, get the possible values from get_metric_names tool"`
89 | // Required: Start time of when to get the logs in seconds since epoch
90 | StartTime int64 `json:"startTime" jsonschema:"required,description=Start time of when to get the metrics in seconds since epoch"`
91 | // Required: End time of when to get the logs in seconds since epoch
92 | EndTime int64 `json:"endTime" jsonschema:"required,description=Start time of when to get the metrics in seconds since epoch"`
93 | // The filters to apply to the logs, so for example, if you want to get logs for a specific service
94 | // you can pass in a filter like {"service_name": ["microservice_a"]}
95 | Filters map[string][]string `json:"filters"`
96 | // The filters to exclude from the logs, so for example, if you want to exclude logs for a specific service
97 | // you can pass in a filter like {"service_name": ["microservice_a"]}
98 | ExcludeFilters map[string][]string `json:"excludeFilters"`
99 | // Splits is a list of attributes to split the metrics by, for example, if you want to split the metrics by service
100 | // you can pass in a list like ["service_name"]
101 | Splits []string `json:"splits"`
102 | // Aggregation is the operation to apply to the metrics, for example, if you want to sum the metrics you can pass in "sum"
103 | Aggregation Aggregation `json:"aggregation"`
104 | // IsRate is a flag to indicate if the metric is a rate metric
105 | IsRate bool `json:"isRate"`
106 | // Functions is the list of functions to apply to the metric, in the same order that they appear in this array!!
107 | Functions []MetricFunction `json:"functions"`
108 | // LimitResults is a flag to indicate if the results should be limited.
109 | LimitResults bool `json:"limitResults"`
110 | // BucketSize is the size of each datapoint bucket in seconds
111 | BucketSize int64 `json:"bucketSize"`
112 | }
113 |
114 | type MetricAttributesRequest struct {
115 | StartTime int64 `json:"startTime"`
116 | EndTime int64 `json:"endTime"`
117 | MetricName string `json:"metricName"`
118 | FilterAttributes map[string][]string `json:"filterAttributes"`
119 | }
120 |
121 | type FuzzyMetricsRequest struct {
122 | MetricFuzzyMatch string `json:"metricFuzzyMatch"`
123 | Environments []string `json:"environments"`
124 | StartTime int64 `json:"startTime"`
125 | EndTime int64 `json:"endTime"`
126 | }
127 |
128 | type GetProfileRequest struct {
129 | // Required: ServiceName to get profiling for
130 | ServiceName string `json:"serviceName"`
131 |
132 | // Optional: ContainerNames to get profiling for
133 | ContainerNames []string `json:"containerNames"`
134 |
135 | // Required: Timestamp to get profiling after this time
136 | // Seconds since epoch
137 | StartTime int64 `json:"startTime"`
138 |
139 | // Required: Timestamp to get profiling this time
140 | // Seconds since epoch
141 | EndTime int64 `json:"endTime"`
142 | }
143 | type GetTraceMetricRequest struct {
144 | // Required: Start time of when to get the logs in seconds since epoch
145 | StartTime int64 `json:"startTime"`
146 | // Required: End time of when to get the logs in seconds since epoch
147 | EndTime int64 `json:"endTime"`
148 |
149 | // Optional: The name of the service to get the trace metrics for
150 | // Acts as an additional filter
151 | ServiceNames []string `json:"serviceNames"`
152 |
153 | // The filters to apply to the logs, so for example, if you want to get logs for a specific service
154 | //you can pass in a filter like {"service_name": ["microservice_a"]}
155 | Filters map[string][]string `json:"filters"`
156 |
157 | // The exclude filters to apply to the logs, so for example, if you want to exclude logs for a specific service
158 | //you can pass in a filter like {"service_name": ["microservice_a"]}
159 | ExcludeFilters map[string][]string `json:"excludeFilters"`
160 |
161 | // Regexes are used to filter traces based on a regex inclusively
162 | Regexes []string `json:"regexes"`
163 | // ExcludeRegexes are used to filter traces based on a regex exclusively
164 | ExcludeRegexes []string `json:"excludeRegexes"`
165 |
166 | // Splts is a list of attributes to split the metrics by, for example, if you want to split the metrics by service
167 | // you can pass in a list like ["service_name"]
168 | Splits []string `json:"splits"`
169 |
170 | // Functions is the array of function to apply to the trace metrics,
171 | //for example, if you want to get the monotonic difference between count of traces each minute.
172 | // Functions are applied in the same order that they appear in this array
173 | Functions []MetricFunction `json:"functions"`
174 |
175 | // Aggregate to apply to trace metrics, for example, if you want to sum the metrics you can pass in "sum"
176 | Aggregate Aggregation `json:"aggregate"`
177 |
178 | // Environments is a list of environments to filter the traces by. If empty, all environments will be included
179 | Environments []string `json:"environments"`
180 |
181 | // LimitResults is a flag to indicate if the results should be limited.
182 | LimitResults bool `json:"limitResults"`
183 |
184 | // BucketSize is the size of each datapoint bucket in seconds
185 | BucketSize int64 `json:"bucketSize"`
186 | }
187 |
188 | type GetSingleTraceSummaryRequest struct {
189 | TracesSummaryRequest
190 | // The attribute to get the summary for
191 | Attribute string `json:"attribute"`
192 | }
193 |
194 | type TracesSummaryRequest struct {
195 | // Required: Start time of when to get the service summaries in seconds since epoch
196 | StartTime int64 `json:"startTime"`
197 | // Required: End time of when to get the service summaries in seconds since epoch
198 | EndTime int64 `json:"endTime"`
199 |
200 | // The filters to apply to the trace summary, so for example, if you want to get traces for a specific service
201 | // you can pass in a filter like {"service_name": ["microservice_a"]}
202 | Filters map[string][]string `json:"filters"`
203 | // ExcludeFilters are used to exclude traces based on a filter
204 | ExcludeFilters map[string][]string `json:"excludeFilters"`
205 |
206 | // Regexes are used to filter traces based on a regex inclusively
207 | Regexes []string `json:"regexes"`
208 | // ExcludeRegexes are used to filter traces based on a regex exclusively
209 | ExcludeRegexes []string `json:"excludeRegexes"`
210 |
211 | // Optional: The name of the service to get the trace metrics for
212 | // Acts as an additional filter
213 | ServiceNames []string `json:"serviceNames"`
214 |
215 | // Environments is the environments to get the traces for. If empty, all environments will be included
216 | Environments []string `json:"environments"`
217 | }
218 |
219 | type GetK8sEventsRequest struct {
220 | // Required: Start time of when to get the k8s events in seconds since epoch
221 | StartTime int64 `json:"startTime"`
222 | // Required: End time of when to get the k8s events in seconds since epoch
223 | EndTime int64 `json:"endTime"`
224 | // The filters to apply to the k8s events, so for example, if you want to get k8s events for a specific service
225 | // you can pass in a filter like {"service_name": ["microservice_a"]}
226 | Filters map[string][]string `json:"filters"`
227 | // ExcludeFilters are filters that should be excluded from the k8s events
228 | // For example, if you want to get k8s events for all services except microservice_a you can pass in
229 | // {"service_name": ["microservice_a"]}
230 | ExcludeFilters map[string][]string `json:"excludeFilters"`
231 | // Previous page endTime in nanoseconds, used to get the next page of k8s events if there are more k8s events than the page size
232 | // If omitted, the first page of k8s events will be returned
233 | PrevEndTime *int64 `json:"prevEndTime"`
234 | // Regexes are used to filter k8s events based on a regex inclusively
235 | Regexes []string `json:"regexes"`
236 | // ExcludeRegexes are used to filter k8s events based on a regex exclusively
237 | ExcludeRegexes []string `json:"excludeRegexes"`
238 | // Ascending is a flag to determine if the k8s events should be returned in ascending order
239 | Ascending bool `json:"ascending"`
240 | // Environments is the environments to get the k8s events for
241 | Environments []string `json:"environments"`
242 | }
243 |
244 | type GetSingleK8sEventSummaryRequest struct {
245 | GetK8sEventsRequest
246 | // The attribute to get the summary for
247 | Attribute string `json:"attribute"`
248 | }
249 |
250 | type GetK8sEventMetricsRequest struct {
251 | // Required: Start time of when to get the logs in seconds since epoch
252 | StartTime int64 `json:"startTime"`
253 | // Required: End time of when to get the logs in seconds since epoch
254 | EndTime int64 `json:"endTime"`
255 |
256 | // The filters to apply to the logs, so for example, if you want to get logs for a specific service
257 | //you can pass in a filter like {"service_name": ["microservice_a"]}
258 | Filters map[string][]string `json:"filters"`
259 |
260 | // The exclude filters to apply to the logs, so for example, if you want to exclude logs for a specific service
261 | //you can pass in a filter like {"service_name": ["microservice_a"]}
262 | ExcludeFilters map[string][]string `json:"excludeFilters"`
263 |
264 | // Regexes are used to filter k8s events based on a regex inclusively
265 | Regexes []string `json:"regexes"`
266 | // ExcludeRegexes are used to filter k8s events based on a regex exclusively
267 | ExcludeRegexes []string `json:"excludeRegexes"`
268 |
269 | // Splts is a list of attributes to split the metrics by, for example, if you want to split the metrics by service
270 | // you can pass in a list like ["service_name"]
271 | Splits []string `json:"splits"`
272 |
273 | // OnlyNumRequests is a flag to only get the number of requests, this is a much faster query
274 | OnlyNumRequests bool `json:"onlyNumRequests"`
275 |
276 | // Environments is a list of environments to filter the k8s events by. If empty, all environments will be included
277 | Environments []string `json:"environments"`
278 | }
279 |
280 | type GetPodsRequest struct {
281 | // Required: Timestamp to get metadata updates after this time
282 | StartTime int64 `json:"startTime"`
283 |
284 | // Required: Timestamp to get metadata updates before this time
285 | EndTime int64 `json:"endTime"`
286 |
287 | // Optional: Environment to filter the pods by. If not provided, all environments are considered
288 | Environments []string `json:"environments"`
289 |
290 | // Optional: ServiceName to get metadata updates. One of ServiceName or NodeName is required
291 | ServiceName string `json:"serviceName"`
292 |
293 | // Optional: NodeName to get metadata updates. One of ServiceName or NodeName is required
294 | NodeName string `json:"nodeName"`
295 | }
296 |
297 | type LogSummaryRequest struct {
298 | // Required: Start time of when to get the service summaries in seconds since epoch
299 | StartTime int64 `json:"startTime"`
300 | // Required: End time of when to get the service summaries in seconds since epoch
301 | EndTime int64 `json:"endTime"`
302 | // The filters to apply to the log summary, so for example, if you want to get logs for a specific service
303 | // you can pass in a filter like {"service_name": ["microservice_a"]}
304 | Filters map[string][]string `json:"filters"`
305 |
306 | ExcludeFilters map[string][]string `json:"excludeFilters"`
307 | // RegexFilter is a regex to filter the logs
308 | Regexes []string `json:"regexes"`
309 | ExcludeRegexes []string `json:"excludeRegexes"`
310 | // The cluster/environments to get the logs for. If empty, all clusters will be included
311 | Environments []string `json:"environments"`
312 | }
313 |
314 | type GetSingleLogSummaryRequest struct {
315 | LogSummaryRequest
316 | // The attribute to get the summary for
317 | Attribute string `json:"attribute"`
318 | }
319 |
320 | type GetAllNodesRequest struct {
321 | // StartTime Required: Start time of when to get the nodes in seconds since epoch
322 | StartTime int64 `json:"startTime"`
323 | // EndTime Required: End time of when to get the nodes in seconds since epoch
324 | EndTime int64 `json:"endTime"`
325 | // Environments The cluster/environments to get the nodes for. If empty, all clusters will be included
326 | Environments []string `json:"environments"`
327 | // Filters The filters to apply to the nodes, so for example, if you want to get subset of nodes that have a specific label
328 | Filters map[string][]string `json:"filters"`
329 | // ExcludeFilters are filters that should be excluded from the nodes
330 | ExcludeFilters map[string][]string `json:"excludeFilters"`
331 | // Splits is a list of attributes to split the nodes by, for example, if you want to split the nodes a label
332 | Splits []string `json:"splits"`
333 | }
334 |
335 | type GetServiceSummariesRequest struct {
336 | // Required: Start time of when to get the service summaries in seconds
337 | StartTime int64 `json:"startTime"`
338 | // Required: End time of when to get the service summaries in seconds
339 | EndTime int64 `json:"endTime"`
340 | // If empty, all services across all environments will be returned
341 | Environments []string `json:"environments"`
342 | // Required: The namespace of the services to get summaries for. If empty, return services from all namespaces
343 | Namespace string `json:"namespace"`
344 | }
345 |
346 | // Dasboarding structs
347 | type SetDashboardRequest struct {
348 | Name string `json:"name"`
349 | Id string `json:"id"`
350 | DashboardJson string `json:"dashboardJson"`
351 | DefaultTimeRange string `json:"defaultTimeRange"`
352 | }
353 |
354 | // WidgetType is an enum representing different types of widgets
355 | type WidgetType string
356 |
357 | const (
358 | MetricChartWidgetType WidgetType = "MetricChart"
359 | GroupWidgetType WidgetType = "Group"
360 | MarkdownWidgetType WidgetType = "Markdown"
361 | )
362 |
363 | // WidgetPosition represents the position of a widget relative to its parent
364 | type WidgetPosition struct {
365 | X *int `json:"x,omitempty"`
366 | Y *int `json:"y,omitempty"`
367 | W *int `json:"w,omitempty" jsonschema:"required,description=The width of the widget. The dashboard is divided into 12 columns.For example a sensible value for a graph would be 6"`
368 | H *int `json:"h,omitempty" jsonschema:"required,description=The height of the widget. Each row is 128px. A sensible value for a graph would be 3."`
369 | }
370 |
371 | // Widget is the base interface for all widget types
372 | type Widget struct {
373 | WidgetType WidgetType `json:"widgetType" jsonschema:"required,description=The type of the widget. This can be MetricChart / Group / Markdown"`
374 | Position *WidgetPosition `json:"position,omitempty" jsonschema:"description=The position of the widget in the dashboard"`
375 | }
376 |
377 | // GroupWidget represents a group of widgets
378 | type GroupWidget struct {
379 | Widget `json:",inline"`
380 | Title *string `json:"title,omitempty" jsonschema:"description=The title of the group widget if present"`
381 | Children []MetricChartWidget `json:"children" jsonschema:"description=The children widgets of the group widget. The children are MetricChartWidgets."`
382 | //Variables []Variable `json:"variables,omitempty"`
383 | }
384 |
385 | // MetricChartWidget represents a metric chart widget
386 | type MetricChartWidget struct {
387 | Widget `json:",inline"`
388 | MetricName string `json:"metricName" jsonschema:"description=The name of the metric to use in the chart if MetricType is metric. If MetricType is trace, this is not used and can be empty. This value is same as the metricName in the get_metric tool and the possible metricNames can be found in the get_metric_names tool"`
389 | Filters map[string][]string `json:"filters,omitempty" jsonschema:"description=The filters to apply to the metric. This is the same as the filters in the get_metric or get_trace_metric tool depending on the MetricType"`
390 | ExcludeFilters map[string][]string `json:"excludeFilters,omitempty" jsonschema:"description=The exclude filters to apply to the metric. This is the same as the exclude filters in the get_metric or get_trace_metric tool depending on the MetricType"`
391 | Splits []string `json:"splits,omitempty" jsonshcema:"description=Splits will allow you to group/split metrics by an attribute. This is useful if you would like to see the breakdown of a particular metric by an attribute. For example if you want to see the breakdown of the metric by X you would set the splits as ['X']"`
392 | Aggregation string `json:"aggregation" jsonschema:"description=The aggregation to apply to the metrics. This is the same as the aggregation in the get_metric or get_trace_metric tool depending on the MetricType"`
393 | Title *string `json:"title,omitempty" jsonschema:"description=The title of the metric chart widget if present"`
394 | Type ChartType `json:"type" jsonschema:"description=The type of the chart to display. Possible values are line / bar."`
395 | MetricType MetricType `json:"metricType" jsonschema:"description=The type of the metric to use in the chart. Possible values are metric / trace. If metric, the metricName should be used."`
396 | Functions []MetricFunction `json:"functions" jsonschema:"description=The functions to apply to the metric. This is the same as the functions in the get_metric or get_trace_metric tool depending on the MetricType"`
397 | }
398 |
399 | // MarkdownWidget represents a markdown content widget
400 | type MarkdownWidget struct {
401 | Widget `json:",inline"`
402 | Content string `json:"content"`
403 | }
404 | type ChartType string
405 |
406 | const (
407 | ChartTypeLine ChartType = "line"
408 | ChartTypeBar ChartType = "bar"
409 | )
410 |
411 | type MetricType string
412 |
413 | const (
414 | Metric MetricType = "metric" // please excuse the bad naming... this is a metric timeseries type.
415 | Trace MetricType = "trace" // trace timeseries type.
416 |
417 | Logs MetricType = "logs" // log timeseries type.
418 |
419 | KubernetesResource MetricType = "kubernetes_resource" // kubernetes resource timeseries type.
420 | )
421 |
422 | type GetLogMetricRequest struct {
423 | GetLogsRequest
424 | Splits []string `json:"splits" jsonschema:"description=Splits will allow you to group/split metrics by an attribute. This is useful if you would like to see the breakdown of a particular metric by an attribute. For example if you want to see the breakdown of the metric by service.name you would set the splits as ['service.name']"`
425 | Functions []MetricFunction `json:"functions" jsonschema:"description=The functions to apply to the log metric. Available functions are monotonicDifference which will calculate the difference between the current and previous value of the metric (negative values will be set to 0) and valueDifference which will calculate the difference between the current and previous value of the metric or MathExpression e.g. a / 60"`
426 | BucketSize int64 `json:"bucketSize" jsonschema:"description=The size of each datapoint bucket in seconds if not provided metoro will select the best bucket size for the given duration for performance and clarity"`
427 | }
428 |
429 | type GetKubernetesResourceRequest struct {
430 | // Required: Start time of when to get the service summaries in seconds since epoch
431 | StartTime int64 `json:"startTime"`
432 | // Required: End time of when to get the service summaries in seconds since epoch
433 | EndTime int64 `json:"endTime"`
434 | // The filters to apply to the kubernetes summary, so for example, if you want to get kubernetess for a specific service
435 | // you can pass in a filter like {"service.name": ["microservice_a"]}
436 | Filters map[string][]string `json:"filters"`
437 | // ExcludeFilters are filters that should be excluded from the kubernetes summary
438 | // For example, if you want to get kubernetess for all services except microservice_a you can pass in
439 | // {"service_name": ["microservice_a"]}
440 | ExcludeFilters map[string][]string `json:"excludeFilters"`
441 | // Splts is a list of attributes to split the metrics by, for example, if you want to split the metrics by service
442 | // you can pass in a list like ["service_name"]
443 | Splits []string `json:"splits"`
444 | // The cluster/environments to get the kubernetes metrics for. If empty, all clusters will be included
445 | Environments []string `json:"environments"`
446 | // Functions is the list of functions to apply to the metric, in the same order that they appear in this array!!
447 | Functions []MetricFunction `json:"functions"`
448 | // LimitResults is a flag to indicate if the results should be limited.
449 | LimitResults bool `json:"limitResults"`
450 | // BucketSize is the size of each datapoint bucket in seconds
451 | BucketSize int64 `json:"bucketSize"`
452 | // Aggregation is the operation to apply to the metrics, for example, if you want to sum the metrics you can pass in "sum"
453 | Aggregation Aggregation `json:"aggregation"`
454 | // JsonPath is a path to pull the json value from the metric
455 | JsonPath *string `json:"jsonPath"`
456 | }
457 |
458 | type GetMultiMetricRequest struct {
459 | // Required: Start time of when to get the service summaries in seconds
460 | StartTime int64 `json:"startTime"`
461 | // Required: End time of when to get the service summaries in seconds
462 | EndTime int64 `json:"endTime"`
463 | Metrics []SingleMetricRequest `json:"metrics" jsonschema:"required,description=Array of metrics to get the timeseries data for"`
464 | Formulas []Formula `json:"formulas" jsonschema:"description=Optional formulas to combine metrics/log metrics/trace metrics. Formula should only consist of formulaIdentifier of the metrics/logs/traces in the metrics array"`
465 | }
466 |
467 | type SingleMetricRequest struct {
468 | Type string `json:"type" jsonschema:"required,enum=metric,enum=trace,enum=logs,enum=kubernetes_resource,description=Type of metric to retrieve"`
469 | Metric *GetMetricRequest `json:"metric,omitempty" jsonschema:"description=Metric request details when type is 'metric'"`
470 | Trace *GetTraceMetricRequest `json:"trace,omitempty" jsonschema:"description=Trace metric request details when type is 'trace'"`
471 | Logs *GetLogMetricRequest `json:"logs,omitempty" jsonschema:"description=Log metric request details when type is 'logs'"`
472 | KubernetesResource *GetKubernetesResourceRequest `json:"kubernetes,omitempty" jsonschema:"description=Kubernetes resource request details when type is 'kubernetes_resource'"`
473 | ShouldNotReturn bool `json:"shouldNotReturn" jsonschema:"description=If true result won't be returned (useful for formulas)"`
474 | FormulaIdentifier string `json:"formulaIdentifier" jsonschema:"description=Identifier to reference this metric in formulas"`
475 | }
476 |
477 | // TODO: Add kubernetes resource request type attributes.
478 | type SingleTimeseriesRequest struct {
479 | Type MetricType `json:"type" jsonschema:"required,enum=metric,enum=trace,enum=logs,enum=kubernetes_resource,description=Type of timeseries data to retrieve. YOU MUST SET THIS TO ONE OF THE AVAILABLE TYPES."`
480 | MetricName string `json:"metricName" jsonschema:"description=THIS IS ONLY REQUIRED IF THE type is 'metric'.The name of the metric to use for getting the timeseries data for type 'metric'. If metric name ends with _total metoro already accounts for rate differences when returning the value so you don't need to calculate the rate yourself."`
481 | Aggregation Aggregation `json:"aggregation" jsonschema:"required,enum=sum,enum=count,enum=min,enum=max,enum=avg,enum=p50,enum=p90,enum=p95,enum=p99,description=The aggregation to apply to the timeseries at the datapoint bucket size level. The aggregation will be applied to every datapoint bucket. For example if the bucket size is 1 minute and the aggregation is sum then the sum of all datapoints in a minute will be returned. Do not guess the aggregations. Use the available ones. For traces you can use count p50 p90 p95 p99. for logs its always count. For metrics you can use sum min max avg"`
482 | JsonPath *string `json:"jsonPath" jsonschema:"description=THIS IS ONLY BE SET IF THE type is 'kubernetes_resource' and the aggregate is not count. The json path to use to get the value from the kubernetes resource to plot. for example if this was spec.replicas then the value we return would be aggregate(spec.replicas)"`
483 | Filters map[string][]string `json:"filters" jsonschema:"description=Filters to apply to the timeseries. Only the timeseries that match these filters will be returned. You MUST call get_attribute_keys and get_attribute_values tools to get the valid filter keys and values. e.g. {service_name: [/k8s/namespaceX/serviceX]} should return timeseries for serviceX in namespaceX. This is just and example. Do not guess the attribute keys and values."`
484 | ExcludeFilters map[string][]string `json:"excludeFilters" jsonschema:"description=Filters to exclude the timeseries data. Timeseries matching the exclude filters will not be returned. You MUST call get_attribute_keys and get_attribute_values tools to get the valid filter keys and values. e.g. {service_name: [/k8s/namespaceX/serviceX]} should exclude timeseries from serviceX in namespaceX. This is just and example. Do not guess the attribute keys and values"`
485 | Splits []string `json:"splits" jsonschema:"description=Array of attribute keys to split/group by the timeseries data by. Splits will allow you to group timeseries data by an attribute. This is useful if you would like to see the breakdown of a particular timeseries by an attribute. Get the attributes that you can pass into as Splits from the get_attribute_keys tool. DO NOT GUESS THE ATTRIBUTES."`
486 | Regexes []string `json:"regexes" jsonschema:"description=This should only be set if the type is 'logs'. Regexes are evaluated against the log message/body. Only the timeseries (logs) data that match these regexes will be returned. Regexes are ANDed together. For example if you want to get log count with message that contains the words 'fish' and 'chips' you would set the regexes as ['fish' 'chips']"`
487 | ExcludeRegexes []string `json:"excludeRegexes" jsonschema:"description=This should only be set if the type is 'logs'. Exclude regexes are evaluated against the log message/body. Log timeseries data that match these regexes will not be returned. Exclude regexes are ORed together. For example if you want to get timeseries data with messages that do not contain the word 'fish' or 'chips' you would set the exclude regexes as ['fish' 'chips']"`
488 | BucketSize int64 `json:"bucketSize" jsonschema:"description=The size of each datapoint bucket in seconds if not provided metoro will select the best bucket size for the given duration for performance and clarity"`
489 | Functions []MetricFunction `json:"functions" jsonschema:"description=Array of functions to apply to the timeseries data in the order as it appears in the array. Functions will be applied to the timeseries data after the aggregation. For example if the aggregation is sum and the function is perSecond then the perSecond of the sum will be returned. Do not guess the functions. Use the available ones. For traces you can use rate. For logs you can use count. For metrics you can use rate sum min max avg. For kubernetes resources you can use rate sum min max avg"`
490 | ShouldNotReturn bool `json:"shouldNotReturn" jsonschema:"description=If true result won't be returned (useful for formulas). Only set this to true if you only want to see the combination of timeseries via defining formulas and if you dont want to see the individual timeseries data.'"`
491 | FormulaIdentifier string `json:"formulaIdentifier" jsonschema:"description=Identifier to reference this metric in formulas. These must be unique for timeseries that you are requesting the first timeseries must be 'a' the second 'b' and so on. If you are not using formulas you can leave this empty. If you are using formulas then you must set this to a unique identifier for each timeseries. For example if you have 3 timeseries and you want to use them in a formula then you would set the first timeseries to 'a' the second to 'b' and the third to 'c'. You can then use these identifiers in the formulas.'"`
492 | }
493 |
494 | type MetricSpecifier struct {
495 | MetricType MetricType `json:"metricType" jsonschema:"required,enum=metric,enum=trace,enum=logs,enum=kubernetes_resource,description=Type of timeseries data to retrieve. YOU MUST SET THIS TO ONE OF THE AVAILABLE TYPES."`
496 | MetricName string `json:"metricName" jsonschema:"description=THIS IS ONLY REQUIRED IF THE type is 'metric'.The name of the metric to use for getting the timeseries data for type 'metric'. If metric name ends with _total metoro already accounts for rate differences when returning the value so you don't need to calculate the rate yourself."`
497 | Filters map[string][]string `json:"filters" jsonschema:"description=Filters to apply to the timeseries. Only the timeseries that match these filters will be returned. You MUST call get_attribute_keys and get_attribute_values tools to get the valid filter keys and values. Do not guess the attribute keys and values."`
498 | ExcludeFilters map[string][]string `json:"excludeFilters" jsonschema:"description=Filters to exclude the timeseries data. Timeseries matching the exclude filters will not be returned. You MUST call get_attribute_keys and get_attribute_values tools to get the valid filter keys and values. Do not guess the attribute keys and values"`
499 | Regexes []string `json:"regexes" jsonschema:"description=This should only be set if the type is 'logs'. Regexes are evaluated against the log message/body. Only the timeseries (logs) data that match these regexes will be returned. Regexes are ANDed together. For example if you want to get log count with message that contains the words 'fish' and 'chips' you would set the regexes as ['fish' 'chips']"`
500 | ExcludeRegexes []string `json:"excludeRegexes" jsonschema:"description=This should only be set if the type is 'logs'. Exclude regexes are evaluated against the log message/body. Log timeseries data that match these regexes will not be returned. Exclude regexes are ORed together. For example if you want to get timeseries data with messages that do not contain the word 'fish' or 'chips' you would set the exclude regexes as ['fish' 'chips']"`
501 | Splits []string `json:"splits" jsonschema:"description=Array of attribute keys to split/group by the timeseries data by. Splits will allow you to group timeseries data by an attribute. This is useful if you would like to see the breakdown of a particular timeseries by an attribute. Get the attributes that you can pass into as Splits from the get_attribute_keys tool. DO NOT GUESS THE ATTRIBUTES."`
502 | Aggregation Aggregation `json:"aggregation" jsonschema:"required,enum=sum,enum=count,enum=min,enum=max,enum=avg,enum=p50,enum=p90,enum=p95,enum=p99,description=The aggregation to apply to the timeseries at the datapoint bucket size level. The aggregation will be applied to every datapoint bucket. For example if the bucket size is 1 minute and the aggregation is sum then the sum of all datapoints in a minute will be returned. Do not guess the aggregations. Use the available ones. For traces you can use count p50 p90 p95 p99. for logs its always count. For metrics you can use sum min max avg"`
503 | BucketSize int64 `json:"bucketSize" jsonschema:"description=The size of each datapoint bucket in seconds if not provided metoro will select the best bucket size for the given duration for performance and clarity"`
504 | Functions []MetricFunction `json:"functions" jsonschema:"description=Array of functions to apply to the timeseries data in the order as it appears in the array. Functions will be applied to the timeseries data after the aggregation. For example if the aggregation is sum and the function is perSecond then the perSecond of the sum will be returned. Do not guess the functions. Use the available ones. For traces you can use rate. For logs you can use count. For metrics you can use rate sum min max avg. For kubernetes resources you can use rate sum min max avg"`
505 | //JsonPath *string `json:"jsonPath"`
506 | ShouldNotReturn bool `json:"shouldNotReturn" jsonschema:"description=If true result won't be returned (useful for formulas). Only set this to true if you only want to see the combination of timeseries via defining formulas and if you dont want to see the individual timeseries data.'"`
507 | FormulaIdentifier string `json:"formulaIdentifier" jsonschema:"description=Identifier to reference this metric in formulas. These must be unique for timeseries that you are requesting the first timeseries must be 'a' the second 'b' and so on. If you are not using formulas you can leave this empty. If you are using formulas then you must set this to a unique identifier for each timeseries. For example if you have 3 timeseries and you want to use them in a formula then you would set the first timeseries to 'a' the second to 'b' and the third to 'c'. You can then use these identifiers in the formulas.'"`
508 | }
509 |
510 | type Formula struct {
511 | Formula string `json:"formula" jsonschema:"description=Math expression combining metric results using their formula identifiers"`
512 | }
513 |
514 | type GetMetricAttributesRequest struct {
515 | // Required: The metric name to get the summary for
516 | MetricName string `json:"metricName"`
517 | // Required: Start time of when to get the service summaries in seconds since epoch
518 | StartTime int64 `json:"startTime"`
519 | // Required: End time of when to get the service summaries in seconds since epoch
520 | EndTime int64 `json:"endTime"`
521 | // Environments is the environments to get the traces for. If empty, all environments will be included
522 | Environments []string `json:"environments"`
523 | }
524 |
525 | type MultiMetricAttributeKeysRequest struct {
526 | Type string `json:"type"`
527 | Metric *GetMetricAttributesRequest `json:"metric,omitempty"`
528 | // Currently trace and logs and kubernetes resource do not have any request parameters
529 | // Only metric has request parameters
530 | }
531 |
532 | type GetAttributeValuesRequest struct {
533 | Type MetricType `json:"type"`
534 | Attribute string `json:"attribute"`
535 | Limit *int `json:"limit"`
536 | Metric *GetMetricAttributesRequest `json:"metric,omitempty"`
537 | Trace *TracesSummaryRequest `json:"trace,omitempty"`
538 | Logs *LogSummaryRequest `json:"logs,omitempty"`
539 | Kubernetes *GetKubernetesResourceRequest `json:"kubernetes,omitempty"`
540 | }
541 |
542 | type GetAttributeKeysResponse struct {
543 | // The attribute values
544 | Attributes []string `json:"attributes"`
545 | }
546 |
547 | type GetMetricNamesResponse struct {
548 | MetricNames []string `json:"metrics"`
549 | }
550 |
551 | // Investigation related types
552 | type CreateInvestigationRequest struct {
553 | Title string `json:"title" binding:"required"`
554 | Summary string `json:"summary" binding:"required"`
555 | RecommendedActions *[]string `json:"recommendedActions,omitempty"`
556 | Markdown string `json:"markdown" binding:"required"`
557 | Tags map[string]string `json:"tags,omitempty"`
558 | IssueStartTime *time.Time `json:"issueStartTime,omitempty"`
559 | IssueEndTime *time.Time `json:"issueEndTime,omitempty"`
560 | ChatHistoryUUID *string `json:"chatHistoryUuid,omitempty"`
561 | // Optional, these ideally should only set by the AI.
562 | IsVisible *bool `json:"isVisible,omitempty"`
563 | MetoroApprovalStatus *string `json:"metoroApprovalStatus,omitempty"`
564 | IssueUUID *string `json:"issueUuid,omitempty"`
565 | InProgress *bool `json:"inProgress,omitempty"`
566 | AlertFireUUID *string `json:"alertFireUuid,omitempty"`
567 | AlertUUID *string `json:"alertUuid,omitempty"`
568 | }
569 |
570 | type UpdateInvestigationRequest struct {
571 | Title *string `json:"title,omitempty"`
572 | Summary *string `json:"summary,omitempty"`
573 | Markdown *string `json:"markdown,omitempty"`
574 | Tags *map[string]string `json:"tags,omitempty"`
575 | IssueStartTime *time.Time `json:"issueStartTime,omitempty"`
576 | IssueEndTime *time.Time `json:"issueEndTime,omitempty"`
577 | IssueUUID *string `json:"issueUuid,omitempty"`
578 | ParentInvestigationUUID *string `json:"parentInvestigationUuid,omitempty"`
579 | IsVisible *bool `json:"isVisible,omitempty"`
580 | MetoroApprovalStatus *string `json:"metoroApprovalStatus,omitempty"`
581 | ChatHistoryUUID *string `json:"chatHistoryUuid,omitempty"`
582 | RecommendedActions *[]string `json:"recommendedActions,omitempty"`
583 | InProgress *bool `json:"inProgress,omitempty"`
584 | }
585 |
586 | type CreateAIIssueRequest struct {
587 | Title string `json:"title"`
588 | Description string `json:"description"`
589 | Summary string `json:"summary"`
590 | }
591 |
592 | type UpdateAIIssueRequest struct {
593 | Title *string `json:"title,omitempty"`
594 | Description *string `json:"description,omitempty"`
595 | Summary *string `json:"summary,omitempty"`
596 | Open *bool `json:"open,omitempty"`
597 | }
598 |
599 | type AIIssue struct {
600 | UUID string `json:"uuid"`
601 | OrganizationUUID string `json:"organizationUuid"`
602 | Title string `json:"title"`
603 | Description string `json:"description"`
604 | Summary string `json:"summary"`
605 | Open bool `json:"open"`
606 | CreatedAt time.Time `json:"createdAt"`
607 | UpdatedAt time.Time `json:"updatedAt"`
608 | }
609 |
610 | type ListAIIssuesResponse struct {
611 | Issues []AIIssue `json:"issues"`
612 | }
613 |
614 | type GetAIIssueResponse struct {
615 | Issue AIIssue `json:"issue"`
616 | }
617 |
618 | type AIIssueEvent struct {
619 | UUID string `json:"uuid"`
620 | IssueUUID string `json:"issueUuid"`
621 | Type string `json:"type"`
622 | CommitSHA *string `json:"commitSha,omitempty"`
623 | VCSLink *string `json:"vcsLink,omitempty"`
624 | MetoroLink *string `json:"metoroLink,omitempty"`
625 | Version *string `json:"version,omitempty"`
626 | Environment *string `json:"environment,omitempty"`
627 | Description *string `json:"description,omitempty"`
628 | InvestigationUUID *string `json:"investigationUuid,omitempty"`
629 | OccurrenceTime *time.Time `json:"occurrenceTime,omitempty"`
630 | CreatedAt time.Time `json:"createdAt"`
631 | }
632 |
633 | type ListAIIssueEventsResponse struct {
634 | Events []AIIssueEvent `json:"events"`
635 | }
636 |
637 | type Log struct {
638 | // The time that the log line was emitted in milliseconds since the epoch
639 | Time int64 `json:"time"`
640 | // The severity of the log line
641 | Severity string `json:"severity"`
642 | // The log message
643 | Message string `json:"message"`
644 | // The attributes of the log line
645 | LogAttributes map[string]string `json:"logAttributes"`
646 | // The attributes of the resource that emitted the log line
647 | ResourceAttributes map[string]string `json:"resourceAttributes"`
648 | // Service name
649 | ServiceName string `json:"serviceName"`
650 | // Environment
651 | Environment string `json:"environment"`
652 | }
653 |
654 | type GetLogsResponse struct {
655 | // The logs that match the filters
656 | Logs []Log `json:"logs"`
657 | }
658 |
659 | type Link struct {
660 | // The trace id of the linked trace
661 | TraceId string `json:"traceId"`
662 | // The span id of the linked trace
663 | SpanId string `json:"spanId"`
664 | // Attributes of the link
665 | Attributes map[string]string `json:"attributes"`
666 | }
667 | type TraceEl struct {
668 | // The id of the trace
669 | TraceId string `json:"traceId"`
670 | // Status Code of the trace
671 | StatusCode string `json:"statusCode"`
672 | // The time that the trace was emitted in milliseconds since the epoch
673 | Time int64 `json:"time"`
674 | // The attributes of the trace
675 | SpanAttributes map[string]string `json:"spanAttributes"`
676 | // The attributes of the resource that emitted the trace
677 | ResourceAttributes map[string]string `json:"resourceAttributes"`
678 | // Service name
679 | ServiceName string `json:"serviceName"`
680 | // Display Service name
681 | DisplayServiceName string `json:"displayServiceName"`
682 | // Client name
683 | ClientName string `json:"clientName"`
684 | // Display Client name
685 | DisplayClientName string `json:"displayClientName"`
686 | // Span Id
687 | SpanId string `json:"spanId"`
688 | // Span Name
689 | SpanName string `json:"spanName"`
690 | // The duration of the trace
691 | Duration int64 `json:"duration"`
692 | // Human readable duration, e.g. "1.2s" or "500ms"
693 | DurationReadable string `json:"durationReadable"`
694 | // The parent span id
695 | ParentSpanId string `json:"parentSpanId"`
696 | // Links
697 | Links []Link `json:"links"`
698 | }
699 |
700 | type GetTracesResponse struct {
701 | // The traces that match the filters
702 | Traces []TraceEl `json:"traces"`
703 | }
704 |
705 | type MetricSpecifiersRequest struct {
706 | MetricSpecifiers []MetricSpecifier `json:"metricSpecifiers" binding:"required"`
707 | Formulas []Formula `json:"formulas"`
708 | }
709 |
710 | type MetricSpecifierToMetoroQLResponse struct {
711 | Queries []string `json:"queries"`
712 | }
713 |
```