This is page 4 of 5. Use http://codebase.md/crowdstrike/falcon-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.dev.example
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug.yaml
│ │ ├── config.yml
│ │ ├── feature-request.yaml
│ │ └── question.yaml
│ └── workflows
│ ├── docker-build-push.yml
│ ├── docker-build-test.yml
│ ├── markdown-lint.yml
│ ├── python-lint.yml
│ ├── python-test-e2e.yml
│ ├── python-test.yml
│ └── release.yml
├── .gitignore
├── .markdownlint.json
├── CHANGELOG.md
├── Dockerfile
├── docs
│ ├── CODE_OF_CONDUCT.md
│ ├── CONTRIBUTING.md
│ ├── deployment
│ │ ├── amazon_bedrock_agentcore.md
│ │ └── google_cloud.md
│ ├── e2e_testing.md
│ ├── module_development.md
│ ├── resource_development.md
│ └── SECURITY.md
├── examples
│ ├── adk
│ │ ├── adk_agent_operations.sh
│ │ ├── falcon_agent
│ │ │ ├── __init__.py
│ │ │ ├── agent.py
│ │ │ ├── env.properties
│ │ │ └── requirements.txt
│ │ └── README.md
│ ├── basic_usage.py
│ ├── mcp_config.json
│ ├── sse_usage.py
│ └── streamable_http_usage.py
├── falcon_mcp
│ ├── __init__.py
│ ├── client.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── api_scopes.py
│ │ ├── errors.py
│ │ ├── logging.py
│ │ └── utils.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── detections.py
│ │ ├── discover.py
│ │ ├── hosts.py
│ │ ├── idp.py
│ │ ├── incidents.py
│ │ ├── intel.py
│ │ ├── sensor_usage.py
│ │ ├── serverless.py
│ │ └── spotlight.py
│ ├── registry.py
│ ├── resources
│ │ ├── __init__.py
│ │ ├── cloud.py
│ │ ├── detections.py
│ │ ├── discover.py
│ │ ├── hosts.py
│ │ ├── incidents.py
│ │ ├── intel.py
│ │ ├── sensor_usage.py
│ │ ├── serverless.py
│ │ └── spotlight.py
│ └── server.py
├── LICENSE
├── pyproject.toml
├── README.md
├── scripts
│ ├── generate_e2e_report.py
│ └── test_results_viewer.html
├── SUPPORT.md
├── tests
│ ├── __init__.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── test_api_scopes.py
│ │ ├── test_errors.py
│ │ ├── test_logging.py
│ │ └── test_utils.py
│ ├── conftest.py
│ ├── e2e
│ │ ├── __init__.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── test_cloud.py
│ │ │ ├── test_detections.py
│ │ │ ├── test_discover.py
│ │ │ ├── test_hosts.py
│ │ │ ├── test_idp.py
│ │ │ ├── test_incidents.py
│ │ │ ├── test_intel.py
│ │ │ ├── test_sensor_usage.py
│ │ │ ├── test_serverless.py
│ │ │ └── test_spotlight.py
│ │ └── utils
│ │ ├── __init__.py
│ │ └── base_e2e_test.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── test_base.py
│ │ ├── test_cloud.py
│ │ ├── test_detections.py
│ │ ├── test_discover.py
│ │ ├── test_hosts.py
│ │ ├── test_idp.py
│ │ ├── test_incidents.py
│ │ ├── test_intel.py
│ │ ├── test_sensor_usage.py
│ │ ├── test_serverless.py
│ │ ├── test_spotlight.py
│ │ └── utils
│ │ └── test_modules.py
│ ├── test_client.py
│ ├── test_registry.py
│ ├── test_server.py
│ └── test_streamable_http_transport.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/falcon_mcp/resources/spotlight.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Contains Spotlight Vulnerabilities resources.
3 | """
4 |
5 | from falcon_mcp.common.utils import generate_md_table
6 |
7 | # List of tuples containing filter options data: (name, type, operators, description)
8 | SEARCH_VULNERABILITIES_FQL_FILTERS = [
9 | (
10 | "Name",
11 | "Type",
12 | "Operators",
13 | "Description"
14 | ),
15 | (
16 | "aid",
17 | "String",
18 | "No",
19 | """
20 | Unique agent identifier (AID) of the sensor where the vulnerability was found.
21 | For assets without a Falcon sensor installed, this field matches the asset ID field.
22 |
23 | Ex: aid:'abcde6b9a3427d8c4a1af416424d6231'
24 | """
25 | ),
26 | (
27 | "apps.remediation.ids",
28 | "String",
29 | "Yes",
30 | """
31 | Unique identifier of a remediation. Supports multiple values and negation.
32 |
33 | Ex: apps.remediation.ids:'7bba2e543744a92962be7afeb6484858'
34 | Ex: apps.remediation.ids:['ID1','ID2','ID3']
35 | """
36 | ),
37 | (
38 | "cid",
39 | "String",
40 | "No",
41 | """
42 | Unique system-generated customer identifier (CID) of the account. In multi-CID environments, you can filter by both parent and child CIDs.
43 |
44 | Ex: cid:'0123456789ABCDEFGHIJKLMNOPQRSTUV'
45 | """
46 | ),
47 | (
48 | "closed_timestamp",
49 | "Timestamp",
50 | "Yes",
51 | """
52 | Date and time a vulnerability was set to a status of CLOSED.
53 |
54 | Ex: closed_timestamp:>'2021-06-25T10:32'
55 | Ex: closed_timestamp:<'2021-10-18'
56 | """
57 | ),
58 | (
59 | "confidence",
60 | "String",
61 | "Yes",
62 | """
63 | Whether or not the vulnerability has been confirmed.
64 | Values: confirmed, potential
65 |
66 | Ex: confidence:'potential'
67 | """
68 | ),
69 | (
70 | "created_timestamp",
71 | "Timestamp",
72 | "Yes",
73 | """
74 | Date and time when this vulnerability was found in your environment. Use this to get vulnerabilities created after the timestamp you last pulled data on.
75 |
76 | Ex: created_timestamp:<'2021-09-25T13:22'
77 | Ex: created_timestamp:>'2021-02-12'
78 | """
79 | ),
80 | (
81 | "cve.base_score",
82 | "Number",
83 | "Yes",
84 | """
85 | CVE base score.
86 |
87 | Ex: cve.base_score:>5.0
88 | """
89 | ),
90 | (
91 | "cve.cwes",
92 | "String",
93 | "Yes",
94 | """
95 | Unique identifier for a vulnerability from the Common Weakness Enumeration (CWE) list.
96 |
97 | Ex: cve.cwes:['CWE-787','CWE-699']
98 | """
99 | ),
100 | (
101 | "cve.exploit_status",
102 | "String",
103 | "Yes",
104 | """
105 | Numeric value of the most severe known exploit. Supports multiple values and negation.
106 | Values: 0=Unproven, 30=Available, 60=Easily accessible, 90=Actively used
107 |
108 | Ex: cve.exploit_status:'60'
109 | Ex: cve.exploit_status:!'0'
110 | """
111 | ),
112 | (
113 | "cve.exprt_rating",
114 | "String",
115 | "Yes",
116 | """
117 | ExPRT rating assigned by CrowdStrike's predictive AI rating system. Value must be in all caps. Supports multiple values and negation.
118 | Values: UNKNOWN, LOW, MEDIUM, HIGH, CRITICAL
119 |
120 | Ex: cve.exprt_rating:'HIGH'
121 | Ex: cve.exprt_rating:['HIGH','CRITICAL']
122 | """
123 | ),
124 | (
125 | "cve.id",
126 | "String",
127 | "Yes",
128 | """
129 | Unique identifier for a vulnerability as cataloged in the National Vulnerability Database (NVD). Supports multiple values and negation. For case-insensitive filtering, add .insensitive to the field name.
130 | Note: All values must be enclosed in brackets.
131 |
132 | Ex: cve.id:['CVE-2022-1234']
133 | Ex: cve.id:['CVE-2022-1234','CVE-2023-1234']
134 | """
135 | ),
136 | (
137 | "cve.is_cisa_kev",
138 | "Boolean",
139 | "Yes",
140 | """
141 | Filter for vulnerabilities that are in the CISA Known Exploited Vulnerabilities (KEV) catalog. Supports negation.
142 |
143 | Ex: cve.is_cisa_kev:true
144 | """
145 | ),
146 | (
147 | "cve.remediation_level",
148 | "String",
149 | "Yes",
150 | """
151 | CVSS remediation level of the vulnerability. Supports multiple values and negation.
152 |
153 | Ex: cve.remediation_level:'O' (official fix)
154 | Ex: cve.remediation_level:'U' (no available fix)
155 | """
156 | ),
157 | (
158 | "cve.severity",
159 | "String",
160 | "Yes",
161 | """
162 | CVSS severity rating of the vulnerability. Value must be in all caps. Supports multiple values and negation.
163 | Values: UNKNOWN, NONE, LOW, MEDIUM, HIGH, CRITICAL
164 |
165 | Ex: cve.severity:'LOW'
166 | Ex: cve.severity:!'UNKNOWN'
167 | """
168 | ),
169 | (
170 | "cve.types",
171 | "String",
172 | "Yes",
173 | """
174 | Vulnerability type.
175 | Values: Vulnerability, Misconfiguration, Unsupported software
176 |
177 | Ex: cve.types:!'Misconfiguration'
178 | """
179 | ),
180 | (
181 | "data_providers.ports",
182 | "String",
183 | "Yes",
184 | """
185 | Ports on the host where the vulnerability was found by the third-party provider.
186 |
187 | Ex: data_providers.ports:'53'
188 | Ex: data_providers.ports:!'0' (any port)
189 | """
190 | ),
191 | (
192 | "data_providers.provider",
193 | "String",
194 | "No",
195 | """
196 | Name of the data provider.
197 |
198 | Ex: data_providers.provider:'{provider name}'
199 | """
200 | ),
201 | (
202 | "data_providers.rating",
203 | "String",
204 | "Yes",
205 | """
206 | Third-party provider rating.
207 | Values: UNKNOWN, NONE, LOW, MEDIUM, HIGH, CRITICAL
208 |
209 | Ex: data_providers.rating:'CRITICAL'
210 | """
211 | ),
212 | (
213 | "data_providers.scan_time",
214 | "Timestamp",
215 | "Yes",
216 | """
217 | UTC date and time when the vulnerability was most recently identified by the third-party provider.
218 |
219 | Ex: data_providers.scan_time:>'2023-08-03'
220 | """
221 | ),
222 | (
223 | "data_providers.scanner_id",
224 | "String",
225 | "No",
226 | """
227 | ID of the third-party scanner that identified the vulnerability.
228 |
229 | Ex: data_providers.scanner_id:'{scanner id}'
230 | """
231 | ),
232 | (
233 | "host_info.asset_criticality",
234 | "String",
235 | "Yes",
236 | """
237 | Assigned criticality level of the asset.
238 | Values: Critical, High, Noncritical, Unassigned
239 |
240 | Ex: host_info.asset_criticality:['Critical','High']
241 | Ex: host_info.asset_criticality:!'Unassigned'
242 | """
243 | ),
244 | (
245 | "host_info.groups",
246 | "String",
247 | "Yes",
248 | """
249 | Unique system-assigned ID of a host group. Supports multiple values and negation. All values must be enclosed in brackets.
250 |
251 | Ex: host_info.groups:['03f0b54af2692e99c4cec945818fbef7']
252 | Ex: host_info.groups:!['03f0b54af2692e99c4cec945818fbef7']
253 | """
254 | ),
255 | (
256 | "host_info.has_run_container",
257 | "Boolean",
258 | "No",
259 | """
260 | Whether or not the host is running Kubernetes containers.
261 |
262 | Ex: host_info.has_run_container:true
263 | """
264 | ),
265 | (
266 | "host_info.internet_exposure",
267 | "String",
268 | "No",
269 | """
270 | Whether or not the asset is internet-facing.
271 | Values: Yes, No, Pending
272 |
273 | Ex: host_info.internet_exposure:'Yes'
274 | """
275 | ),
276 | (
277 | "host_info.managed_by",
278 | "String",
279 | "Yes",
280 | """
281 | Indicates if the asset has the Falcon sensor installed.
282 | Values: Falcon sensor, Unmanaged
283 | Supports multiple values and negation.
284 |
285 | Ex: host_info.managed_by:'Unmanaged'
286 | """
287 | ),
288 | (
289 | "host_info.platform_name",
290 | "String",
291 | "Yes",
292 | """
293 | Operating system platform. Supports negation.
294 | Values: Windows, Mac, Linux
295 |
296 | Ex: host_info.platform_name:'Windows'
297 | Ex: host_info.platform_name:!'Linux'
298 | """
299 | ),
300 | (
301 | "host_info.product_type_desc",
302 | "String",
303 | "Yes",
304 | """
305 | Type of host a sensor is running on. Supports multiple values and negation. For case-insensitive filtering, add .insensitive to the field name. Enter values with first letter capitalized.
306 | Values: Workstation, Server, Domain Controller
307 |
308 | Ex: host_info.product_type_desc:'Workstation'
309 | Ex: host_info.product_type_desc:!'Workstation'
310 | """
311 | ),
312 | (
313 | "host_info.tags",
314 | "String",
315 | "Yes",
316 | """
317 | Name of a tag assigned to a host. Supports multiple values and negation. All values must be enclosed in brackets.
318 |
319 | Ex: host_info.tags:['ephemeral']
320 | Ex: host_info.tags:!['search','ephemeral']
321 | """
322 | ),
323 | (
324 | "host_info.third_party_asset_ids",
325 | "String",
326 | "Yes",
327 | """
328 | Asset IDs assigned to the host by third-party providers in the format: {data_provider}: {data_provider_asset_id}
329 | Supports multiple values and negation.
330 |
331 | Ex: host_info.third_party_asset_ids:'{provider}: {asset_id}'
332 | """
333 | ),
334 | (
335 | "last_seen_within",
336 | "Number",
337 | "No",
338 | """
339 | Filter for vulnerabilities based on the number of days since a host last connected to Falcon. Enter a numeric value from 3 to 45 to indicate the number of days to look back.
340 |
341 | Ex: last_seen_within:'10'
342 | """
343 | ),
344 | (
345 | "services.port",
346 | "String",
347 | "No",
348 | """
349 | Port on the host where a vulnerability was found by Falcon EASM or a third-party provider.
350 |
351 | Ex: services.port:'443'
352 | """
353 | ),
354 | (
355 | "services.protocol",
356 | "String",
357 | "No",
358 | """
359 | Network protocols recognized by Falcon EASM.
360 |
361 | Ex: services.protocol:'pop3'
362 | """
363 | ),
364 | (
365 | "services.transport",
366 | "String",
367 | "No",
368 | """
369 | Transport methods recognized by Falcon EASM.
370 |
371 | Ex: services.transport:'tcp'
372 | """
373 | ),
374 | (
375 | "status",
376 | "String",
377 | "Yes",
378 | """
379 | Status of a vulnerability. Value must be in all lowercase letters. Supports multiple values and negation.
380 | Values: open, closed, reopen, expired
381 |
382 | Ex: status:'open'
383 | Ex: status:!'closed'
384 | Ex: status:['open','reopen']
385 | """
386 | ),
387 | (
388 | "suppression_info.is_suppressed",
389 | "Boolean",
390 | "No",
391 | """
392 | Indicates if the vulnerability is suppressed by a suppression rule or not.
393 |
394 | Ex: suppression_info.is_suppressed:true
395 | """
396 | ),
397 | (
398 | "suppression_info.reason",
399 | "String",
400 | "Yes",
401 | """
402 | Attribute assigned to a suppression rule. Supports multiple values and negation. All values must be enclosed in brackets.
403 | Values: ACCEPT_RISK, COMPENSATING_CONTROL, FALSE_POSITIVE
404 |
405 | Ex: suppression_info.reason:['ACCEPT_RISK']
406 | Ex: suppression_info.reason:!['FALSE_POSITIVE']
407 | """
408 | ),
409 | (
410 | "updated_timestamp",
411 | "Timestamp",
412 | "Yes",
413 | """
414 | UTC date and time of the last update made on a vulnerability.
415 |
416 | Ex: updated_timestamp:<'2021-10-20T22:36'
417 | Ex: updated_timestamp:>'2021-09-15'
418 | """
419 | ),
420 | (
421 | "vulnerability_id",
422 | "String",
423 | "Yes",
424 | """
425 | CVE ID of the vulnerability. If there's no CVE ID, this is the CrowdStrike or third-party ID of the vulnerability.
426 | For case-insensitive filtering, add .insensitive to the field name. Supports multiple values and negation.
427 |
428 | Ex: vulnerability_id:['CVE-2022-1234']
429 | Ex: vulnerability_id:['CVE-2022-1234','CVE-2023-4321']
430 | """
431 | ),
432 | ]
433 |
434 | SEARCH_VULNERABILITIES_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Search Vulnerabilities Guide
435 |
436 | === BASIC SYNTAX ===
437 | property_name:[operator]'value'
438 |
439 | === AVAILABLE OPERATORS ===
440 | • No operator = equals (default)
441 | • ! = not equal to
442 | • > = greater than
443 | • >= = greater than or equal
444 | • < = less than
445 | • <= = less than or equal
446 | • ~ = text match (ignores case, spaces, punctuation)
447 | • !~ = does not text match
448 |
449 | === DATA TYPES & SYNTAX ===
450 | • Strings: 'value' or ['exact_value'] for exact match
451 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
452 | • Booleans: true or false (no quotes)
453 | • Numbers: 123 (no quotes)
454 |
455 | === COMBINING CONDITIONS ===
456 | • + = AND condition
457 | • , = OR condition
458 | • ( ) = Group expressions
459 |
460 | === falcon_search_vulnerabilities FQL filter options ===
461 |
462 | """ + generate_md_table(SEARCH_VULNERABILITIES_FQL_FILTERS) + """
463 |
464 | === IMPORTANT NOTES ===
465 | • Use single quotes around string values: 'value'
466 | • Use square brackets for exact matches and multiple values: ['value1','value2']
467 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
468 | • For case-insensitive filtering, add .insensitive to field names
469 | • Boolean values: true or false (no quotes)
470 | • Wildcards (*) are unsupported in this API
471 | • Some fields require specific capitalization (check individual field descriptions)
472 |
473 | === COMMON FILTER EXAMPLES ===
474 | • High severity vulnerabilities: cve.severity:'HIGH'
475 | • Recent vulnerabilities: created_timestamp:>'2024-01-01'
476 | • Windows vulnerabilities: host_info.platform_name:'Windows'
477 | • Open vulnerabilities with exploits: status:'open'+cve.exploit_status:!'0'
478 | • Critical ExPRT rated vulnerabilities: cve.exprt_rating:'CRITICAL'
479 | • CISA KEV vulnerabilities: cve.is_cisa_kev:true
480 | """
481 |
```
--------------------------------------------------------------------------------
/tests/modules/test_hosts.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Tests for the Hosts module.
3 | """
4 |
5 | import unittest
6 |
7 | from falcon_mcp.modules.hosts import HostsModule
8 | from tests.modules.utils.test_modules import TestModules
9 |
10 |
11 | class TestHostsModule(TestModules):
12 | """Test cases for the Hosts module."""
13 |
14 | def setUp(self):
15 | """Set up test fixtures."""
16 | self.setup_module(HostsModule)
17 |
18 | def test_register_tools(self):
19 | """Test registering tools with the server."""
20 | expected_tools = [
21 | "falcon_search_hosts",
22 | "falcon_get_host_details",
23 | ]
24 | self.assert_tools_registered(expected_tools)
25 |
26 | def test_register_resources(self):
27 | """Test registering resources with the server."""
28 | expected_resources = [
29 | "falcon_search_hosts_fql_guide",
30 | ]
31 | self.assert_resources_registered(expected_resources)
32 |
33 | def test_search_hosts(self):
34 | """Test searching for hosts."""
35 | # Setup mock responses for both API calls
36 | query_response = {
37 | "status_code": 200,
38 | "body": {"resources": ["device1", "device2"]},
39 | }
40 | details_response = {
41 | "status_code": 200,
42 | "body": {"resources": []}, # Empty resources for PostDeviceDetailsV2
43 | }
44 | self.mock_client.command.side_effect = [query_response, details_response]
45 |
46 | # Call search_hosts
47 | result = self.module.search_hosts(filter="platform_name:'Windows'", limit=50)
48 |
49 | # Verify client commands were called correctly
50 | self.assertEqual(self.mock_client.command.call_count, 2)
51 |
52 | # Check that the first call was to QueryDevicesByFilter with the right filter and limit
53 | first_call = self.mock_client.command.call_args_list[0]
54 | self.assertEqual(first_call[0][0], "QueryDevicesByFilter")
55 | self.assertEqual(
56 | first_call[1]["parameters"]["filter"], "platform_name:'Windows'"
57 | )
58 | self.assertEqual(first_call[1]["parameters"]["limit"], 50)
59 | self.mock_client.command.assert_any_call(
60 | "PostDeviceDetailsV2", body={"ids": ["device1", "device2"]}
61 | )
62 |
63 | # Verify result
64 | self.assertEqual(
65 | result, []
66 | ) # Empty list because PostDeviceDetailsV2 returned empty resources
67 |
68 | def test_search_hosts_with_details(self):
69 | """Test searching for hosts with details."""
70 | # Setup mock responses
71 | query_response = {
72 | "status_code": 200,
73 | "body": {"resources": ["device1", "device2"]},
74 | }
75 | details_response = {
76 | "status_code": 200,
77 | "body": {
78 | "resources": [
79 | {
80 | "device_id": "device1",
81 | "hostname": "TEST-HOST-1",
82 | "platform_name": "Windows",
83 | },
84 | {
85 | "device_id": "device2",
86 | "hostname": "TEST-HOST-2",
87 | "platform_name": "Linux",
88 | },
89 | ]
90 | },
91 | }
92 | self.mock_client.command.side_effect = [query_response, details_response]
93 |
94 | # Call search_hosts
95 | result = self.module.search_hosts(filter="platform_name:'Windows'", limit=50)
96 |
97 | # Verify client commands were called correctly
98 | self.assertEqual(self.mock_client.command.call_count, 2)
99 |
100 | # Check that the first call was to QueryDevicesByFilter with the right filter and limit
101 | first_call = self.mock_client.command.call_args_list[0]
102 | self.assertEqual(first_call[0][0], "QueryDevicesByFilter")
103 | self.assertEqual(
104 | first_call[1]["parameters"]["filter"], "platform_name:'Windows'"
105 | )
106 | self.assertEqual(first_call[1]["parameters"]["limit"], 50)
107 | self.mock_client.command.assert_any_call(
108 | "PostDeviceDetailsV2", body={"ids": ["device1", "device2"]}
109 | )
110 |
111 | # Verify result
112 | expected_result = [
113 | {
114 | "device_id": "device1",
115 | "hostname": "TEST-HOST-1",
116 | "platform_name": "Windows",
117 | },
118 | {
119 | "device_id": "device2",
120 | "hostname": "TEST-HOST-2",
121 | "platform_name": "Linux",
122 | },
123 | ]
124 | self.assertEqual(result, expected_result)
125 |
126 | def test_search_hosts_error(self):
127 | """Test searching for hosts with API error."""
128 | # Setup mock response with error
129 | mock_response = {
130 | "status_code": 400,
131 | "body": {"errors": [{"message": "Invalid filter"}]},
132 | }
133 | self.mock_client.command.return_value = mock_response
134 |
135 | # Call search_hosts
136 | result = self.module.search_hosts(filter="invalid_filter")
137 |
138 | # Verify result contains error
139 | self.assertEqual(len(result), 1)
140 | self.assertIn("error", result[0])
141 | self.assertIn("details", result[0])
142 |
143 | def test_search_hosts_no_results(self):
144 | """Test searching for hosts with no results."""
145 | # Setup mock response with empty resources
146 | mock_response = {"status_code": 200, "body": {"resources": []}}
147 | self.mock_client.command.return_value = mock_response
148 |
149 | # Call search_hosts
150 | result = self.module.search_hosts(filter="hostname:'NONEXISTENT'")
151 |
152 | # Verify result is empty list
153 | self.assertEqual(result, [])
154 | # Only one API call should be made (QueryDevicesByFilter)
155 | self.assertEqual(self.mock_client.command.call_count, 1)
156 |
157 | def test_search_hosts_with_all_parameters(self):
158 | """Test searching for hosts with all parameters."""
159 | # Setup mock response with empty resources
160 | mock_response = {"status_code": 200, "body": {"resources": []}}
161 | self.mock_client.command.return_value = mock_response
162 |
163 | # Call search_hosts with all parameters
164 | result = self.module.search_hosts(
165 | filter="platform_name:'Linux'", limit=25, offset=10, sort="hostname.desc"
166 | )
167 |
168 | # Verify API call with all parameters
169 | self.mock_client.command.assert_called_once_with(
170 | "QueryDevicesByFilter",
171 | parameters={
172 | "filter": "platform_name:'Linux'",
173 | "limit": 25,
174 | "offset": 10,
175 | "sort": "hostname.desc",
176 | },
177 | )
178 |
179 | # Verify result
180 | self.assertEqual(result, [])
181 |
182 | def test_get_host_details(self):
183 | """Test getting host details."""
184 | # Setup mock response
185 | mock_response = {
186 | "status_code": 200,
187 | "body": {
188 | "resources": [
189 | {
190 | "device_id": "device1",
191 | "hostname": "TEST-HOST-1",
192 | "platform_name": "Windows",
193 | }
194 | ]
195 | },
196 | }
197 | self.mock_client.command.return_value = mock_response
198 |
199 | # Call get_host_details
200 | result = self.module.get_host_details(["device1"])
201 |
202 | # Verify client command was called correctly
203 | self.mock_client.command.assert_called_once_with(
204 | "PostDeviceDetailsV2", body={"ids": ["device1"]}
205 | )
206 |
207 | # Verify result
208 | expected_result = [
209 | {
210 | "device_id": "device1",
211 | "hostname": "TEST-HOST-1",
212 | "platform_name": "Windows",
213 | }
214 | ]
215 | self.assertEqual(result, expected_result)
216 |
217 | def test_get_host_details_multiple_ids(self):
218 | """Test getting host details for multiple IDs."""
219 | # Setup mock response
220 | mock_response = {
221 | "status_code": 200,
222 | "body": {
223 | "resources": [
224 | {
225 | "device_id": "device1",
226 | "hostname": "TEST-HOST-1",
227 | "platform_name": "Windows",
228 | },
229 | {
230 | "device_id": "device2",
231 | "hostname": "TEST-HOST-2",
232 | "platform_name": "Linux",
233 | },
234 | ]
235 | },
236 | }
237 | self.mock_client.command.return_value = mock_response
238 |
239 | # Call get_host_details
240 | result = self.module.get_host_details(["device1", "device2"])
241 |
242 | # Verify client command was called correctly
243 | self.mock_client.command.assert_called_once_with(
244 | "PostDeviceDetailsV2", body={"ids": ["device1", "device2"]}
245 | )
246 |
247 | # Verify result
248 | expected_result = [
249 | {
250 | "device_id": "device1",
251 | "hostname": "TEST-HOST-1",
252 | "platform_name": "Windows",
253 | },
254 | {
255 | "device_id": "device2",
256 | "hostname": "TEST-HOST-2",
257 | "platform_name": "Linux",
258 | },
259 | ]
260 | self.assertEqual(result, expected_result)
261 |
262 | def test_get_host_details_not_found(self):
263 | """Test getting host details for non-existent host."""
264 | # Setup mock response with empty resources
265 | mock_response = {"status_code": 200, "body": {"resources": []}}
266 | self.mock_client.command.return_value = mock_response
267 |
268 | # Call get_host_details
269 | result = self.module.get_host_details(["nonexistent"])
270 |
271 | # For empty resources, handle_api_response returns the default_result (empty list)
272 | self.assertEqual(result, [])
273 |
274 | def test_get_host_details_error(self):
275 | """Test getting host details with API error."""
276 | # Setup mock response with error
277 | mock_response = {
278 | "status_code": 404,
279 | "body": {"errors": [{"message": "Device not found"}]},
280 | }
281 | self.mock_client.command.return_value = mock_response
282 |
283 | # Call get_host_details
284 | result = self.module.get_host_details(["invalid-id"])
285 |
286 | # Verify result contains error
287 | self.assertIsInstance(result, dict)
288 | self.assertIn("error", result)
289 | self.assertIn("details", result)
290 |
291 | def test_get_host_details_empty_list(self):
292 | """Test getting host details with empty ID list."""
293 | # Call get_host_details with empty list
294 | result = self.module.get_host_details([])
295 |
296 | # Should return empty list without making API call
297 | self.assertEqual(result, [])
298 | self.mock_client.command.assert_not_called()
299 |
300 | def test_search_hosts_windows_platform(self):
301 | """Test searching for Windows hosts."""
302 | # Setup mock responses
303 | query_response = {
304 | "status_code": 200,
305 | "body": {"resources": ["win-host-1", "win-host-2"]},
306 | }
307 | details_response = {
308 | "status_code": 200,
309 | "body": {
310 | "resources": [
311 | {
312 | "device_id": "win-host-1",
313 | "platform_name": "Windows",
314 | "hostname": "WIN-01",
315 | },
316 | {
317 | "device_id": "win-host-2",
318 | "platform_name": "Windows",
319 | "hostname": "WIN-02",
320 | },
321 | ]
322 | },
323 | }
324 | self.mock_client.command.side_effect = [query_response, details_response]
325 |
326 | # Call search_hosts
327 | result = self.module.search_hosts(filter="platform_name:'Windows'")
328 |
329 | # Verify result
330 | self.assertEqual(len(result), 2)
331 | self.assertEqual(result[0]["platform_name"], "Windows")
332 | self.assertEqual(result[1]["platform_name"], "Windows")
333 |
334 | # Verify filter was applied correctly
335 | first_call = self.mock_client.command.call_args_list[0]
336 | self.assertEqual(
337 | first_call[1]["parameters"]["filter"], "platform_name:'Windows'"
338 | )
339 |
340 | def test_search_hosts_linux_platform(self):
341 | """Test searching for Linux hosts."""
342 | # Setup mock responses
343 | query_response = {"status_code": 200, "body": {"resources": ["linux-host-1"]}}
344 | details_response = {
345 | "status_code": 200,
346 | "body": {
347 | "resources": [
348 | {
349 | "device_id": "linux-host-1",
350 | "platform_name": "Linux",
351 | "hostname": "LINUX-01",
352 | }
353 | ]
354 | },
355 | }
356 | self.mock_client.command.side_effect = [query_response, details_response]
357 |
358 | # Call search_hosts
359 | result = self.module.search_hosts(filter="platform_name:'Linux'")
360 |
361 | # Verify result
362 | self.assertEqual(len(result), 1)
363 | self.assertEqual(result[0]["platform_name"], "Linux")
364 |
365 | # Verify filter was applied correctly
366 | first_call = self.mock_client.command.call_args_list[0]
367 | self.assertEqual(first_call[1]["parameters"]["filter"], "platform_name:'Linux'")
368 |
369 | def test_search_hosts_mac_platform_no_results(self):
370 | """Test searching for Mac hosts with no results."""
371 | # Setup mock response with empty resources
372 | mock_response = {"status_code": 200, "body": {"resources": []}}
373 | self.mock_client.command.return_value = mock_response
374 |
375 | # Call search_hosts
376 | result = self.module.search_hosts(filter="platform_name:'Mac'")
377 |
378 | # Verify result
379 | self.assertEqual(len(result), 0)
380 |
381 | # Verify filter was applied correctly
382 | first_call = self.mock_client.command.call_args_list[0]
383 | self.assertEqual(first_call[1]["parameters"]["filter"], "platform_name:'Mac'")
384 |
385 |
386 | if __name__ == "__main__":
387 | unittest.main()
388 |
```
--------------------------------------------------------------------------------
/tests/e2e/utils/base_e2e_test.py:
--------------------------------------------------------------------------------
```python
1 | """Base class for E2E tests."""
2 |
3 | import asyncio
4 | import atexit
5 | import json
6 | import os
7 | import threading
8 | import time
9 | import unittest
10 | from typing import Any
11 | from unittest.mock import MagicMock, patch
12 |
13 | from dotenv import load_dotenv
14 | from langchain_openai import ChatOpenAI
15 | from mcp_use import MCPAgent, MCPClient
16 |
17 | from falcon_mcp.server import FalconMCPServer
18 |
19 | # Load environment variables from .env file for local development
20 | load_dotenv()
21 |
22 | # Default models to test against
23 | DEFAULT_MODLES_TO_TEST = ["gpt-4.1-mini", "gpt-4o-mini"]
24 | # Default number of times to run each test
25 | DEFAULT_RUNS_PER_TEST = 2
26 | # Default success threshold for passing a test
27 | DEFAULT_SUCCESS_TRESHOLD = 0.7
28 |
29 | # Models to test against
30 | MODELS_TO_TEST = os.getenv("MODELS_TO_TEST", ",".join(DEFAULT_MODLES_TO_TEST)).split(
31 | ","
32 | )
33 | # Number of times to run each test
34 | RUNS_PER_TEST = int(os.getenv("RUNS_PER_TEST", str(DEFAULT_RUNS_PER_TEST)))
35 | # Success threshold for passing a test
36 | SUCCESS_THRESHOLD = float(os.getenv("SUCCESS_TRESHOLD", str(DEFAULT_SUCCESS_TRESHOLD)))
37 |
38 |
39 | # Module-level singleton for shared server resources
40 | class SharedTestServer:
41 | """Singleton class to manage shared test server resources."""
42 |
43 | instance = None
44 | initialized = False
45 |
46 | def __new__(cls):
47 | if cls.instance is None:
48 | cls.instance = super().__new__(cls)
49 | return cls.instance
50 |
51 | def __init__(self):
52 | if not self.initialized:
53 | # Group server-related attributes
54 | self.server_config = {
55 | "thread": None,
56 | "client": None,
57 | "loop": None,
58 | }
59 |
60 | # Group patching-related attributes
61 | self.patchers = {
62 | "env": None,
63 | "api": None,
64 | "mock_api_instance": None,
65 | }
66 |
67 | # Group test configuration
68 | self.test_config = {
69 | "results": [],
70 | "verbosity_level": 0,
71 | "base_url": os.getenv("OPENAI_BASE_URL"),
72 | "models_to_test": MODELS_TO_TEST,
73 | }
74 |
75 | self._cleanup_registered = False
76 |
77 | def initialize(self):
78 | """Initialize the shared server and test environment."""
79 | if self.initialized:
80 | return
81 |
82 | print("Initializing shared FalconMCP server for E2E tests...")
83 |
84 | self.server_config["loop"] = asyncio.new_event_loop()
85 | asyncio.set_event_loop(self.server_config["loop"])
86 |
87 | self.patchers["env"] = patch.dict(
88 | os.environ,
89 | {
90 | "FALCON_CLIENT_ID": "test-client-id",
91 | "FALCON_CLIENT_SECRET": "test-client-secret",
92 | "FALCON_BASE_URL": "https://api.test.crowdstrike.com",
93 | "OPENAI_API_KEY": os.getenv("OPENAI_API_KEY", "test-openai-key"),
94 | },
95 | )
96 | self.patchers["env"].start()
97 |
98 | self.patchers["api"] = patch("falcon_mcp.client.APIHarnessV2")
99 | mock_apiharness_class = self.patchers["api"].start()
100 |
101 | self.patchers["mock_api_instance"] = MagicMock()
102 | self.patchers["mock_api_instance"].login.return_value = True
103 | self.patchers["mock_api_instance"].token_valid.return_value = True
104 | mock_apiharness_class.return_value = self.patchers["mock_api_instance"]
105 |
106 | server = FalconMCPServer(debug=False)
107 | self.server_config["thread"] = threading.Thread(
108 | target=server.run, args=("sse",)
109 | )
110 | self.server_config["thread"].daemon = True
111 | self.server_config["thread"].start()
112 | time.sleep(2) # Wait for the server to initialize
113 |
114 | server_config = {"mcpServers": {"falcon": {"url": "http://127.0.0.1:8000/sse"}}}
115 | self.server_config["client"] = MCPClient(config=server_config)
116 |
117 | self.__class__.initialized = True
118 |
119 | # Register cleanup function to run when Python exits (only once)
120 | if not self._cleanup_registered:
121 | atexit.register(self.cleanup)
122 | self._cleanup_registered = True
123 |
124 | print("Shared FalconMCP server initialized successfully.")
125 |
126 | def cleanup(self):
127 | """Clean up the shared server and test environment."""
128 | if not self.initialized:
129 | return
130 |
131 | print("Cleaning up shared FalconMCP server...")
132 |
133 | try:
134 | # Write test results to file
135 | with open("test_results.json", "w", encoding="utf-8") as f:
136 | json.dump(self.test_config["results"], f, indent=4)
137 |
138 | if self.patchers["api"]:
139 | try:
140 | self.patchers["api"].stop()
141 | except (RuntimeError, AttributeError) as e:
142 | print(f"Warning: API patcher cleanup error: {e}")
143 |
144 | if self.patchers["env"]:
145 | try:
146 | self.patchers["env"].stop()
147 | except (RuntimeError, AttributeError) as e:
148 | print(f"Warning: Environment patcher cleanup error: {e}")
149 |
150 | if (
151 | self.server_config["loop"]
152 | and not self.server_config["loop"].is_closed()
153 | ):
154 | try:
155 | self.server_config["loop"].close()
156 | asyncio.set_event_loop(None)
157 | except RuntimeError as e:
158 | print(f"Warning: Event loop cleanup error: {e}")
159 |
160 | # Reset state
161 | self.__class__.initialized = False
162 | self._cleanup_registered = False
163 |
164 | print("Shared FalconMCP server cleanup completed.")
165 | except (IOError, OSError) as e:
166 | print(f"Error during cleanup: {e}")
167 | # Still reset the state even if cleanup partially failed
168 | self.__class__.initialized = False
169 | self._cleanup_registered = False
170 |
171 |
172 | # Global singleton instance
173 | _shared_server = SharedTestServer()
174 |
175 |
176 | def ensure_dict(data: Any) -> dict:
177 | """
178 | Return input if it is a dict, otherwise, attempt to convert it to a dict using json.loads
179 | """
180 | if isinstance(data, dict):
181 | return data
182 | return json.loads(data)
183 |
184 |
185 | class BaseE2ETest(unittest.TestCase):
186 | """
187 | Base class for end-to-end tests for the Falcon MCP Server.
188 |
189 | This class sets up a live server in a separate thread, mocks the Falcon API,
190 | and provides helper methods for running tests with an MCP client and agent.
191 |
192 | The server is shared across all test classes that inherit from this base class.
193 | """
194 |
195 | def __init__(self, *args, **kwargs):
196 | super().__init__(*args, **kwargs)
197 | self.llm = None
198 | self.agent = None
199 |
200 | @classmethod
201 | def setUpClass(cls):
202 | """Set up the test environment for the entire class."""
203 | # Initialize the shared server
204 | _shared_server.initialize()
205 |
206 | # Set instance variables to point to shared resources
207 | cls.test_results = _shared_server.test_config["results"]
208 | cls._server_thread = _shared_server.server_config["thread"]
209 | cls._env_patcher = _shared_server.patchers["env"]
210 | cls._api_patcher = _shared_server.patchers["api"]
211 | cls._mock_api_instance = _shared_server.patchers["mock_api_instance"]
212 | cls.models_to_test = _shared_server.test_config["models_to_test"]
213 | cls.base_url = _shared_server.test_config["base_url"]
214 | cls.verbosity_level = _shared_server.test_config["verbosity_level"]
215 | cls.client = _shared_server.server_config["client"]
216 | cls.loop = _shared_server.server_config["loop"]
217 |
218 | @classmethod
219 | def tearDownClass(cls):
220 | """Tear down the test environment for the current class."""
221 | # Don't cleanup here - let atexit handle it
222 |
223 | def setUp(self):
224 | """Set up test fixtures before each test method."""
225 | self.assertTrue(
226 | self._server_thread.is_alive(), "Server thread did not start correctly."
227 | )
228 | self._mock_api_instance.reset_mock()
229 |
230 | async def _run_agent_stream(self, prompt: str) -> tuple[list, str]:
231 | """
232 | Run the agent stream for a given prompt and return the tools used and the final result.
233 |
234 | Args:
235 | prompt: The input prompt to send to the agent.
236 |
237 | Returns:
238 | A tuple containing the list of tool calls and the final string result from the agent.
239 | """
240 | result = ""
241 | tools = []
242 | await self.agent.initialize()
243 | async for event in self.agent.stream_events(prompt, manage_connector=False):
244 | event_type = event.get("event")
245 | data = event.get("data", {})
246 | name = event.get("name")
247 |
248 | if event_type == "on_tool_end" and name == "use_tool_from_server":
249 | tools.append(data)
250 | elif event_type == "on_chat_model_stream" and data.get("chunk"):
251 | result += str(data["chunk"].content)
252 | return tools, result
253 |
254 | def run_test_with_retries(
255 | self,
256 | test_name: str,
257 | test_logic_coro: callable,
258 | assertion_logic: callable,
259 | ):
260 | """
261 | Run a given test logic multiple times against different models and check for a success threshold.
262 |
263 | Args:
264 | test_name: The name of the test being run.
265 | test_logic_coro: An asynchronous function that runs the agent and returns tools and result.
266 | assertion_logic: A function that takes tools and result and performs assertions.
267 | """
268 | # Extract module name from the test class name
269 | module_name = self._get_module_name()
270 | success_count = 0
271 | total_runs = len(self.models_to_test) * RUNS_PER_TEST
272 |
273 | for model_name in self.models_to_test:
274 | self._setup_model_and_agent(model_name)
275 | success_count += self._run_model_tests(
276 | test_name, module_name, model_name, test_logic_coro, assertion_logic
277 | )
278 |
279 | self._assert_success_threshold(success_count, total_runs)
280 |
281 | def _setup_model_and_agent(self, model_name: str):
282 | """Set up the LLM and agent for a specific model."""
283 | # Initialize ChatOpenAI with base_url only if it's provided
284 | kwargs = {"model": model_name, "temperature": 0.7}
285 | if self.base_url:
286 | kwargs["base_url"] = self.base_url
287 |
288 | self.llm = ChatOpenAI(**kwargs)
289 |
290 | # Set agent verbosity based on pytest verbosity
291 | verbose_mode = self.verbosity_level > 0
292 | self.agent = MCPAgent(
293 | llm=self.llm,
294 | client=self.client,
295 | max_steps=20,
296 | verbose=verbose_mode,
297 | use_server_manager=True,
298 | memory_enabled=False,
299 | )
300 |
301 | def _run_model_tests(
302 | self,
303 | test_name: str,
304 | module_name: str,
305 | model_name: str,
306 | test_logic_coro: callable,
307 | assertion_logic: callable,
308 | ) -> int:
309 | """Run tests for a specific model and return success count."""
310 | model_success_count = 0
311 |
312 | for i in range(RUNS_PER_TEST):
313 | print(
314 | f"Running test {test_name} with model {model_name}, try {i + 1}/{RUNS_PER_TEST}"
315 | )
316 | run_result = {
317 | "test_name": test_name,
318 | "module_name": module_name,
319 | "model_name": model_name,
320 | "run_number": i + 1,
321 | "status": "failure",
322 | "failure_reason": None,
323 | "tools_used": None,
324 | "agent_result": None,
325 | }
326 |
327 | try:
328 | # Each test logic run needs a clean slate.
329 | self._mock_api_instance.reset_mock()
330 | tools, result = self.loop.run_until_complete(test_logic_coro())
331 | run_result.update(
332 | {
333 | "tools_used": tools,
334 | "agent_result": result,
335 | }
336 | )
337 |
338 | assertion_logic(tools, result)
339 | run_result["status"] = "success"
340 | model_success_count += 1
341 | except AssertionError as e:
342 | run_result["failure_reason"] = f"Assertion failed: {str(e)}"
343 | print(f"Assertion failed with model {model_name}, try {i + 1}: {e}")
344 | except Exception as e:
345 | # Catch any other exception that might occur during agent streaming or test execution
346 | # fmt: off
347 | run_result["failure_reason"] = f"Test execution failed: {type(e).__name__}: {str(e)}"
348 | print(f"Test execution failed with model {model_name}, try {i + 1}: {type(e).__name__}: {e}")
349 | finally:
350 | self.test_results.append(run_result)
351 |
352 | return model_success_count
353 |
354 | def _assert_success_threshold(self, success_count: int, total_runs: int):
355 | """Assert that the success rate meets the threshold."""
356 | success_rate = success_count / total_runs if total_runs > 0 else 0
357 | print(f"Success rate: {success_rate * 100:.2f}% ({success_count}/{total_runs})")
358 | self.assertGreaterEqual(
359 | success_rate,
360 | SUCCESS_THRESHOLD,
361 | f"Success rate of {success_rate * 100:.2f}% is below the required {SUCCESS_THRESHOLD * 100:.2f}% threshold.",
362 | )
363 |
364 | def _get_module_name(self) -> str:
365 | """
366 | Extract the module name from the test class name.
367 | Expected pattern: Test{ModuleName}ModuleE2E -> {ModuleName}
368 | """
369 | class_name = self.__class__.__name__
370 | # Remove 'Test' prefix and 'ModuleE2E' suffix
371 | if class_name.startswith("Test") and class_name.endswith("ModuleE2E"):
372 | module_name = class_name[
373 | 4:-9
374 | ] # Remove 'Test' (4 chars) and 'ModuleE2E' (9 chars)
375 | return module_name
376 |
377 | # Fallback: use the class name as-is if it doesn't match the expected pattern
378 | return class_name
379 |
380 | def _create_mock_api_side_effect(self, fixtures: list) -> callable:
381 | """Create a side effect function for the `mock API` based on a list of fixtures."""
382 |
383 | def mock_api_side_effect(operation: str, **kwargs: dict) -> dict:
384 | print(f"Mock API called with: operation={operation}, kwargs={kwargs}")
385 | for fixture in fixtures:
386 | if fixture["operation"] == operation and fixture["validator"](kwargs):
387 | print(
388 | f"Found matching fixture for {operation}, returning {fixture['response']}"
389 | )
390 | return fixture["response"]
391 | print(f"No matching fixture found for {operation}")
392 | return {"status_code": 200, "body": {"resources": []}}
393 |
394 | return mock_api_side_effect
395 |
```
--------------------------------------------------------------------------------
/tests/e2e/modules/test_incidents.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | E2E tests for the Incidents module.
3 | """
4 |
5 | import json
6 | import unittest
7 |
8 | import pytest
9 |
10 | from tests.e2e.utils.base_e2e_test import BaseE2ETest, ensure_dict
11 |
12 |
13 | @pytest.mark.e2e
14 | class TestIncidentsModuleE2E(BaseE2ETest):
15 | """
16 | End-to-end test suite for the Falcon MCP Server Incidents Module.
17 | """
18 |
19 | def test_crowd_score_default_parameters(self):
20 | """Verify the agent can retrieve CrowdScore with default parameters."""
21 |
22 | async def test_logic():
23 | fixtures = [
24 | {
25 | "operation": "CrowdScore",
26 | "validator": lambda kwargs: kwargs.get("parameters", {}).get(
27 | "limit"
28 | )
29 | == 100,
30 | "response": {
31 | "status_code": 200,
32 | "body": {
33 | "resources": [
34 | {"id": "score-1", "score": 50, "adjusted_score": 60},
35 | {"id": "score-2", "score": 70, "adjusted_score": 80},
36 | {"id": "score-3", "score": 40, "adjusted_score": 50},
37 | ]
38 | },
39 | },
40 | }
41 | ]
42 |
43 | self._mock_api_instance.command.side_effect = (
44 | self._create_mock_api_side_effect(fixtures)
45 | )
46 |
47 | prompt = "What is our current CrowdScore?"
48 | return await self._run_agent_stream(prompt)
49 |
50 | def assertions(tools, result):
51 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
52 | used_tool = tools[len(tools) - 1]
53 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_show_crowd_score")
54 |
55 | # Verify the output contains the expected data
56 | output = json.loads(used_tool["output"])
57 | self.assertEqual(
58 | output["average_score"], 53
59 | ) # (50+70+40)/3 = 53.33 rounded to 53
60 | self.assertEqual(
61 | output["average_adjusted_score"], 63
62 | ) # (60+80+50)/3 = 63.33 rounded to 63
63 | self.assertEqual(len(output["scores"]), 3)
64 |
65 | # Verify API call parameters
66 | self.assertGreaterEqual(
67 | self._mock_api_instance.command.call_count,
68 | 1,
69 | "Expected at least 1 API call",
70 | )
71 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
72 | "parameters", {}
73 | )
74 | self.assertEqual(api_call_params.get("limit"), 100) # Default limit
75 | self.assertEqual(api_call_params.get("offset"), 0) # Default offset
76 |
77 | # Verify result contains CrowdScore information
78 | self.assertIn("CrowdScore", result)
79 | self.assertIn("53", result) # Average score should be mentioned
80 |
81 | self.run_test_with_retries(
82 | "test_crowd_score_default_parameters", test_logic, assertions
83 | )
84 |
85 | def test_search_incidents_with_filter(self):
86 | """Verify the agent can search for incidents with a filter."""
87 |
88 | async def test_logic():
89 | fixtures = [
90 | {
91 | "operation": "QueryIncidents",
92 | "validator": lambda kwargs: "state:'open'"
93 | in kwargs.get("parameters", {}).get("filter", ""),
94 | "response": {
95 | "status_code": 200,
96 | "body": {"resources": ["incident-1", "incident-2"]},
97 | },
98 | },
99 | {
100 | "operation": "GetIncidents",
101 | "validator": lambda kwargs: "incident-1"
102 | in kwargs.get("body", {}).get("ids", []),
103 | "response": {
104 | "status_code": 200,
105 | "body": {
106 | "resources": [
107 | {
108 | "id": "incident-1",
109 | "name": "Test Incident 1",
110 | "description": "This is a test incident",
111 | "status": 20, # New
112 | "state": "open",
113 | "final_score": 80,
114 | "start": "2023-01-01T00:00:00Z",
115 | "end": "2023-01-02T00:00:00Z",
116 | },
117 | {
118 | "id": "incident-2",
119 | "name": "Test Incident 2",
120 | "description": "This is another test incident",
121 | "status": 30, # In Progress
122 | "state": "open",
123 | "final_score": 65,
124 | "start": "2023-01-03T00:00:00Z",
125 | "end": "2023-01-04T00:00:00Z",
126 | },
127 | ]
128 | },
129 | },
130 | },
131 | ]
132 |
133 | self._mock_api_instance.command.side_effect = (
134 | self._create_mock_api_side_effect(fixtures)
135 | )
136 |
137 | prompt = "Find all open incidents"
138 | return await self._run_agent_stream(prompt)
139 |
140 | def assertions(tools, result):
141 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
142 | used_tool = tools[len(tools) - 1]
143 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_incidents")
144 |
145 | # Verify the tool input contains the filter
146 | tool_input = ensure_dict(used_tool["input"]["tool_input"])
147 | self.assertIn("open", tool_input.get("filter", "").lower())
148 |
149 | # Verify API call parameters
150 | self.assertGreaterEqual(
151 | self._mock_api_instance.command.call_count,
152 | 2,
153 | "Expected at least 2 API calls",
154 | )
155 |
156 | # Check QueryIncidents call
157 | api_call_1_params = self._mock_api_instance.command.call_args_list[0][
158 | 1
159 | ].get("parameters", {})
160 | self.assertIn("state:'open'", api_call_1_params.get("filter", ""))
161 |
162 | # Check GetIncidents call
163 | api_call_2_body = self._mock_api_instance.command.call_args_list[1][1].get(
164 | "body", {}
165 | )
166 | self.assertEqual(api_call_2_body.get("ids"), ["incident-1", "incident-2"])
167 |
168 | # Verify result contains incident information
169 | self.assertIn("incident-1", result)
170 | self.assertIn("Test Incident 1", result)
171 | self.assertIn("incident-2", result)
172 | self.assertIn("Test Incident 2", result)
173 |
174 | self.run_test_with_retries(
175 | "test_search_incidents_with_filter", test_logic, assertions
176 | )
177 |
178 | def test_get_incident_details(self):
179 | """Verify the agent can get details for specific incidents."""
180 |
181 | async def test_logic():
182 | fixtures = [
183 | {
184 | "operation": "GetIncidents",
185 | "validator": lambda kwargs: "incident-3"
186 | in kwargs.get("body", {}).get("ids", []),
187 | "response": {
188 | "status_code": 200,
189 | "body": {
190 | "resources": [
191 | {
192 | "id": "incident-3",
193 | "name": "High Priority Incident",
194 | "description": "Critical security incident requiring immediate attention",
195 | "status": 30, # In Progress
196 | "state": "open",
197 | "final_score": 95,
198 | "start": "2023-02-01T00:00:00Z",
199 | "end": "2023-02-02T00:00:00Z",
200 | "tags": ["Critical", "Security Breach"],
201 | "host_ids": ["host-1", "host-2"],
202 | }
203 | ]
204 | },
205 | },
206 | }
207 | ]
208 |
209 | self._mock_api_instance.command.side_effect = (
210 | self._create_mock_api_side_effect(fixtures)
211 | )
212 |
213 | prompt = "Get details for incident with ID incident-3"
214 | return await self._run_agent_stream(prompt)
215 |
216 | def assertions(tools, result):
217 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
218 | used_tool = tools[len(tools) - 1]
219 | self.assertEqual(
220 | used_tool["input"]["tool_name"], "falcon_get_incident_details"
221 | )
222 |
223 | # Verify the tool input contains the incident ID
224 | tool_input = ensure_dict(used_tool["input"]["tool_input"])
225 | self.assertIn("incident-3", tool_input.get("ids", []))
226 |
227 | # Verify API call parameters
228 | self.assertGreaterEqual(
229 | self._mock_api_instance.command.call_count,
230 | 1,
231 | "Expected at least 1 API call",
232 | )
233 | api_call_body = self._mock_api_instance.command.call_args_list[0][1].get(
234 | "body", {}
235 | )
236 | self.assertEqual(api_call_body.get("ids"), ["incident-3"])
237 |
238 | # Verify result contains incident information
239 | self.assertIn("incident-3", result)
240 | self.assertIn("High Priority Incident", result)
241 | self.assertIn("Critical security incident", result)
242 | self.assertIn("95", result) # Score
243 |
244 | self.run_test_with_retries("test_get_incident_details", test_logic, assertions)
245 |
246 | def test_search_behaviors(self):
247 | """Verify the agent can search for behaviors with a filter."""
248 |
249 | async def test_logic():
250 | fixtures = [
251 | {
252 | "operation": "QueryBehaviors",
253 | "validator": lambda kwargs: "tactic:'Defense Evasion'"
254 | in kwargs.get("parameters", {}).get("filter", ""),
255 | "response": {
256 | "status_code": 200,
257 | "body": {"resources": ["behavior-1", "behavior-2"]},
258 | },
259 | },
260 | {
261 | "operation": "GetBehaviors",
262 | "validator": lambda kwargs: "behavior-1"
263 | in kwargs.get("body", {}).get("ids", []),
264 | "response": {
265 | "status_code": 200,
266 | "body": {
267 | "resources": [
268 | {
269 | "id": "behavior-1",
270 | "tactic": "Defense Evasion",
271 | },
272 | {
273 | "id": "behavior-2",
274 | "tactic": "Defense Evasion",
275 | },
276 | ]
277 | },
278 | },
279 | },
280 | ]
281 |
282 | self._mock_api_instance.command.side_effect = (
283 | self._create_mock_api_side_effect(fixtures)
284 | )
285 |
286 | prompt = "Find behaviors with the tactic 'Defense Evasion'"
287 | return await self._run_agent_stream(prompt)
288 |
289 | def assertions(tools, result):
290 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
291 | used_tool = tools[len(tools) - 1]
292 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_behaviors")
293 |
294 | # Verify the tool input contains the filter
295 | tool_input = ensure_dict(used_tool["input"]["tool_input"])
296 | self.assertIn("tactic", tool_input.get("filter", "").lower())
297 |
298 | # Verify API call parameters
299 | self.assertGreaterEqual(
300 | self._mock_api_instance.command.call_count,
301 | 2,
302 | "Expected at least 2 API calls",
303 | )
304 |
305 | # Check QueryBehaviors call
306 | api_call_1_params = self._mock_api_instance.command.call_args_list[0][
307 | 1
308 | ].get("parameters", {})
309 | self.assertIn(
310 | "tactic:'Defense Evasion'", api_call_1_params.get("filter", "")
311 | )
312 |
313 | # Check GetBehaviors call
314 | api_call_2_body = self._mock_api_instance.command.call_args_list[1][1].get(
315 | "body", {}
316 | )
317 | self.assertEqual(api_call_2_body.get("ids"), ["behavior-1", "behavior-2"])
318 |
319 | # Verify result contains behavior information
320 | self.assertIn("behavior-1", result)
321 | self.assertIn("behavior-2", result)
322 | self.assertIn("Defense Evasion", result)
323 |
324 | self.run_test_with_retries("test_search_behaviors", test_logic, assertions)
325 |
326 | def test_get_behavior_details(self):
327 | """Verify the agent can get details for specific behaviors."""
328 |
329 | async def test_logic():
330 | fixtures = [
331 | {
332 | "operation": "GetBehaviors",
333 | "validator": lambda kwargs: "behavior-3"
334 | in kwargs.get("body", {}).get("ids", []),
335 | "response": {
336 | "status_code": 200,
337 | "body": {
338 | "resources": [
339 | {
340 | "id": "behavior-3",
341 | "tactic": "Exfiltration",
342 | }
343 | ]
344 | },
345 | },
346 | }
347 | ]
348 |
349 | self._mock_api_instance.command.side_effect = (
350 | self._create_mock_api_side_effect(fixtures)
351 | )
352 |
353 | prompt = "Get details for behavior with ID behavior-3"
354 | return await self._run_agent_stream(prompt)
355 |
356 | def assertions(tools, result):
357 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
358 | used_tool = tools[len(tools) - 1]
359 | self.assertEqual(
360 | used_tool["input"]["tool_name"], "falcon_get_behavior_details"
361 | )
362 |
363 | # Verify the tool input contains the behavior ID
364 | tool_input = ensure_dict(used_tool["input"]["tool_input"])
365 | self.assertIn("behavior-3", tool_input.get("ids", []))
366 |
367 | # Verify API call parameters
368 | self.assertGreaterEqual(
369 | self._mock_api_instance.command.call_count,
370 | 1,
371 | "Expected at least 1 API call",
372 | )
373 | api_call_body = self._mock_api_instance.command.call_args_list[0][1].get(
374 | "body", {}
375 | )
376 | self.assertEqual(api_call_body.get("ids"), ["behavior-3"])
377 |
378 | # Verify result contains behavior information
379 | self.assertIn("behavior-3", result)
380 | self.assertIn("Exfiltration", result)
381 |
382 | self.run_test_with_retries("test_get_behavior_details", test_logic, assertions)
383 |
384 |
385 | if __name__ == "__main__":
386 | unittest.main()
387 |
```
--------------------------------------------------------------------------------
/docs/module_development.md:
--------------------------------------------------------------------------------
```markdown
1 | # Falcon MCP Server Module Development Guide
2 |
3 | This guide provides instructions for implementing new modules for the Falcon MCP server.
4 |
5 | ## Module Structure
6 |
7 | Each module should:
8 |
9 | 1. Inherit from the `BaseModule` class
10 | 2. Implement the `register_tools` method
11 | 3. Define tool methods that interact with the Falcon API
12 | 4. Use common utilities for configuration, logging, error handling, and API interactions
13 |
14 | ## Step-by-Step Implementation Guide
15 |
16 | ### 1. Create a New Module File
17 |
18 | Create a new file in the `falcon_mcp/modules` directory:
19 |
20 | ```python
21 | """
22 | [Module Name] module for Falcon MCP Server
23 |
24 | This module provides tools for [brief description].
25 | """
26 | from typing import Dict, List, Optional, Any
27 |
28 | from mcp.server import FastMCP
29 |
30 | from falcon_mcp.common.logging import get_logger
31 | from falcon_mcp.common.errors import handle_api_response
32 | from falcon_mcp.common.utils import prepare_api_parameters, extract_first_resource
33 | from falcon_mcp.modules.base import BaseModule
34 |
35 |
36 | class YourModule(BaseModule):
37 | """Module for [description]."""
38 |
39 | def register_tools(self, server: FastMCP) -> None:
40 | """Register tools with the MCP server.
41 |
42 | Args:
43 | server: MCP server instance
44 | """
45 | # Register tools
46 | self._add_tool(
47 | server=server,
48 | method=self.your_tool_method,
49 | name="your_tool_name",
50 | )
51 |
52 | # Add more tools as needed
53 |
54 | def your_tool_method(self, param1: str, param2: Optional[int] = None) -> Dict[str, Any]:
55 | """Description of what your tool does.
56 |
57 | Args:
58 | param1: Description of param1
59 | param2: Description of param2
60 |
61 | Returns:
62 | Tool result description
63 | """
64 | # Prepare parameters
65 | params = prepare_api_parameters({
66 | "param1": param1,
67 | "param2": param2,
68 | })
69 |
70 | # Define the operation name (used for error handling)
71 | operation = "YourFalconAPIOperation"
72 |
73 | # Make the API request
74 | response = self.client.command(operation, parameters=params)
75 |
76 | # Handle the response
77 | return handle_api_response(
78 | response,
79 | operation=operation,
80 | error_message="Failed to perform operation",
81 | default_result={},
82 | )
83 | ```
84 |
85 | ### 2. Update API Scope Requirements
86 |
87 | Add your API operations to the `API_SCOPE_REQUIREMENTS` dictionary in `falcon_mcp/common/errors.py`:
88 |
89 | ```python
90 | API_SCOPE_REQUIREMENTS = {
91 | # Existing operations...
92 | "YourFalconAPIOperation": ["required:scope"],
93 | # Add more operations as needed
94 | }
95 | ```
96 |
97 | ### 3. Module Auto-Discovery
98 |
99 | Modules are automatically discovered by the registry system. You don't need to call any registration functions or add imports:
100 |
101 | 1. Create your module class in the `falcon_mcp/modules` directory (e.g., `your_module.py`)
102 | 2. Make sure it inherits from `BaseModule`
103 | 3. **Modules are automatically discovered** - no manual imports or registration needed
104 |
105 | The server will automatically discover and register your module during initialization. The module name will be derived
106 | from the class name (e.g., `YourModule` becomes `your`).
107 |
108 | During server initialization, the registry system will:
109 |
110 | 1. Scan the modules directory using `pkgutil.iter_modules()`
111 | 2. Dynamically import each module file using `importlib.import_module()`
112 | 3. Find classes that end with "Module" (excluding BaseModule) via introspection
113 | 4. Register them in the `AVAILABLE_MODULES` dictionary
114 | 5. Make them available to the server
115 |
116 | This approach eliminates manual registration while maintaining a clean architecture that avoids cyclic imports.
117 |
118 | ### 4. Add Tests
119 |
120 | Create a test file in the `tests/modules` directory that inherits from the `TestModules` base class:
121 |
122 | ```python
123 | """
124 | Tests for the YourModule module.
125 | """
126 | from falcon_mcp.modules.your_module import YourModule
127 | from tests.modules.utils.test_modules import TestModules
128 |
129 |
130 | class TestYourModule(TestModules):
131 | """Test cases for the YourModule module."""
132 |
133 | def setUp(self):
134 | """Set up test fixtures."""
135 | self.setup_module(YourModule)
136 |
137 | def test_register_tools(self):
138 | """Test registering tools with the server."""
139 | expected_tools = [
140 | "falcon_your_tool_name",
141 | # Add other tools here
142 | ]
143 | self.assert_tools_registered(expected_tools)
144 |
145 | def test_your_tool_method(self):
146 | """Test your tool method."""
147 | # Setup mock response
148 | mock_response = {
149 | "status_code": 200,
150 | "body": {
151 | "resources": [{"id": "test", "name": "Test Resource"}]
152 | }
153 | }
154 | self.mock_client.command.return_value = mock_response
155 |
156 | # Call your tool method
157 | result = self.module.your_tool_method("test_param", 123)
158 |
159 | # Verify client command was called correctly
160 | self.mock_client.command.assert_called_once_with(
161 | "YourFalconAPIOperation",
162 | parameters={"param1": "test_param", "param2": 123}
163 | )
164 |
165 | # Verify result
166 | expected_result = [{"id": "test", "name": "Test Resource"}]
167 | self.assertEqual(result, expected_result)
168 |
169 | def test_your_tool_method_error(self):
170 | """Test your tool method with API error."""
171 | # Setup mock response with error
172 | mock_response = {
173 | "status_code": 403,
174 | "body": {
175 | "errors": [{"message": "Access denied"}]
176 | }
177 | }
178 | self.mock_client.command.return_value = mock_response
179 |
180 | # Call your tool method
181 | result = self.module.your_tool_method("test_param")
182 |
183 | # Verify result contains error
184 | self.assertIn("error", result)
185 | self.assertIn("details", result)
186 | ```
187 |
188 | The `TestModules` base class provides:
189 |
190 | 1. A `setup_module()` method that handles the common setup of mocking the client and server
191 | 2. An `assert_tools_registered()` helper method to verify tool registration
192 |
193 | This approach simplifies test code and ensures consistency across all module tests.
194 |
195 | ## Contributing Module Changes
196 |
197 | When contributing new modules or changes to existing modules, please follow these guidelines:
198 |
199 | ### Conventional Commits for Modules
200 |
201 | This project uses [Conventional Commits](https://www.conventionalcommits.org/) for automated releases and clear commit history. When contributing module-related changes, use these commit message patterns:
202 |
203 | **Adding New Modules:**
204 |
205 | ```bash
206 | git commit -m "feat(modules): add [module-name] module for [functionality]"
207 | # Examples:
208 | git commit -m "feat(modules): add spotlight module for vulnerability management"
209 | git commit -m "feat(modules): add intel module for threat intelligence analysis"
210 | ```
211 |
212 | **Adding Tools to Existing Modules (Preferred - More Specific Scoping):**
213 |
214 | ```bash
215 | git commit -m "feat(modules/[module-name]): add [specific-functionality]"
216 | # Examples:
217 | git commit -m "feat(modules/cloud): add list kubernetes clusters tool"
218 | git commit -m "feat(modules/hosts): add list devices tool"
219 | git commit -m "feat(modules/detections): add advanced filtering capability"
220 | ```
221 |
222 | **Modifying Existing Modules:**
223 |
224 | ```bash
225 | git commit -m "feat(modules/[module-name]): enhance [specific-functionality]"
226 | git commit -m "fix(modules/[module-name]): resolve [specific-issue]"
227 | # Examples:
228 | git commit -m "feat(modules/detections): enhance FQL filtering with new operators"
229 | git commit -m "fix(modules/hosts): resolve authentication error in search function"
230 | ```
231 |
232 | **General Module Changes (Less Preferred but Acceptable):**
233 |
234 | ```bash
235 | git commit -m "feat(modules): enhance [module-name] with [new-functionality]"
236 | git commit -m "fix(modules): resolve [issue] in [module-name] module"
237 | # Examples:
238 | git commit -m "feat(modules): enhance detections module with FQL filtering"
239 | git commit -m "fix(modules): resolve authentication error in hosts module"
240 | ```
241 |
242 | **Module Tests and Documentation:**
243 |
244 | ```bash
245 | git commit -m "test(modules): add comprehensive tests for [module-name] module"
246 | git commit -m "docs(modules): update [module-name] module documentation"
247 | ```
248 |
249 | See the main [CONTRIBUTING.md](CONTRIBUTING.md) guide for complete conventional commits guidelines.
250 |
251 | ## Best Practices
252 |
253 | ### Error Handling
254 |
255 | 1. **Use Common Error Utilities**: Always use `handle_api_response` for API responses instead of manual status code checks
256 | 2. **Provide Operation Names**: Include the operation name for better error messages and permission handling
257 | 3. **Custom Error Messages**: Use descriptive error messages for each operation
258 | 4. **Consistent Error Format**: Ensure error responses follow the standard format with `error` and optional `details` fields
259 |
260 | Example of proper error handling:
261 |
262 | ```python
263 | # Make the API request
264 | response = self.client.command(operation, parameters=params)
265 |
266 | # Use handle_api_response to process the response
267 | result = handle_api_response(
268 | response,
269 | operation=operation,
270 | error_message="Failed to perform operation",
271 | default_result=[]
272 | )
273 |
274 | # Check if the result is an error response
275 | if isinstance(result, dict) and "error" in result:
276 | # Handle error case
277 | return result # or wrap it in a list if returning to a tool expecting a list
278 | ```
279 |
280 | ### Parameter Handling
281 |
282 | 1. **Use prepare_api_parameters**: Filter out None values and format parameters
283 | 2. **Type Annotations**: Always include type annotations for parameters and return values
284 | 3. **Default Values**: Provide sensible defaults for optional parameters
285 |
286 | ### Response Processing
287 |
288 | 1. **Use extract_resources**: Extract resources from API responses
289 | 2. **Handle Empty Results**: Provide appropriate defaults for empty results
290 | 3. **Return Structured Data**: Return well-structured data that follows consistent patterns
291 |
292 | ### Documentation
293 |
294 | 1. **Docstrings**: Include detailed docstrings for all classes and methods. Tool descriptions are derived from method docstrings, so make sure they are comprehensive and well-written.
295 | 2. **Parameter Descriptions**: Document all parameters and return values
296 | 3. **Examples**: Include examples in docstrings where helpful
297 |
298 | ### Testing
299 |
300 | 1. **Test All Tools**: Write tests for all tools in your module
301 | 2. **Test Error Cases**: Include tests for error scenarios
302 | 3. **Mock API Responses**: Use mock responses for testing
303 |
304 | ## Common Utilities Reference
305 |
306 | ### Configuration (`falcon_mcp/common/config.py`)
307 |
308 | - `FalconConfig`: Configuration class for the Falcon MCP server
309 | - `load_config`: Load configuration from environment variables and arguments
310 |
311 | ### Logging (`falcon_mcp/common/logging.py`)
312 |
313 | - `configure_logging`: Configure logging for the Falcon MCP server
314 | - `get_logger`: Get a logger with the specified name
315 |
316 | ### Error Handling (`falcon_mcp/common/errors.py`)
317 |
318 | - `is_success_response`: Check if an API response indicates success
319 | - `get_required_scopes`: Get the required API scopes for a specific operation
320 | - `_format_error_response`: Format an error as a standardized response
321 | - `handle_api_response`: Handle an API response, returning either the result or an error
322 |
323 | ### Utilities (`falcon_mcp/common/utils.py`)
324 |
325 | - `filter_none_values`: Remove None values from a dictionary
326 | - `prepare_api_parameters`: Prepare parameters for Falcon API requests
327 | - `extract_resources`: Extract resources from an API response
328 | - `extract_first_resource`: Extract the first resource from an API response
329 |
330 | ## Example: Implementing a Hosts Module
331 |
332 | Here's an example of implementing a Hosts module that provides tools for accessing and managing hosts in the Falcon platform:
333 |
334 | ```python
335 | """
336 | Hosts module for Falcon MCP Server
337 |
338 | This module provides tools for accessing and managing CrowdStrike Falcon hosts.
339 | """
340 | from typing import Dict, List, Optional, Any
341 |
342 | from mcp.server import FastMCP
343 |
344 | from falcon_mcp.common.errors import handle_api_response
345 | from falcon_mcp.common.utils import prepare_api_parameters, extract_resources, extract_first_resource
346 | from falcon_mcp.modules.base import BaseModule
347 |
348 |
349 | class HostsModule(BaseModule):
350 | """Module for accessing and managing CrowdStrike Falcon hosts."""
351 |
352 | def register_tools(self, server: FastMCP) -> None:
353 | """Register tools with the MCP server.
354 |
355 | Args:
356 | server: MCP server instance
357 | """
358 | # Register tools
359 | self._add_tool(
360 | server=server,
361 | method=self.search_hosts,
362 | name="search_hosts",
363 | )
364 |
365 | self._add_tool(
366 | server=server,
367 | method=self.get_host_details,
368 | name="get_host_details",
369 | )
370 |
371 | self._add_tool(
372 | server=server,
373 | method=self.get_host_count,
374 | name="get_host_count",
375 | )
376 |
377 | def search_hosts(self, query: Optional[str] = None, limit: int = 100) -> List[Dict[str, Any]]:
378 | """Search for hosts in your CrowdStrike environment.
379 |
380 | Args:
381 | query: FQL query string to filter hosts
382 | limit: Maximum number of results to return
383 |
384 | Returns:
385 | List of host details
386 | """
387 | # Prepare parameters
388 | params = prepare_api_parameters({
389 | "filter": query,
390 | "limit": limit,
391 | })
392 |
393 | # Define the operation name
394 | operation = "QueryDevices"
395 |
396 | # Make the API request
397 | response = self.client.command(operation, parameters=params)
398 |
399 | # Handle the response
400 | host_ids = handle_api_response(
401 | response,
402 | operation=operation,
403 | error_message="Failed to search hosts",
404 | default_result=[],
405 | )
406 |
407 | # If we have host IDs, get the details for each one
408 | if host_ids:
409 | details_operation = "GetDeviceDetails"
410 | details_response = self.client.command(
411 | details_operation,
412 | body={"ids": host_ids}
413 | )
414 |
415 | return handle_api_response(
416 | details_response,
417 | operation=details_operation,
418 | error_message="Failed to get host details",
419 | default_result=[],
420 | )
421 |
422 | return []
423 |
424 | def get_host_details(self, host_id: str) -> Dict[str, Any]:
425 | """Get detailed information about a specific host.
426 |
427 | Args:
428 | host_id: The ID of the host to retrieve
429 |
430 | Returns:
431 | Host details
432 | """
433 | # Define the operation name
434 | operation = "GetDeviceDetails"
435 |
436 | # Make the API request
437 | response = self.client.command(
438 | operation,
439 | body={"ids": [host_id]},
440 | )
441 |
442 | # Extract the first resource
443 | return extract_first_resource(
444 | response,
445 | operation=operation,
446 | not_found_error="Host not found",
447 | )
448 |
449 | def get_host_count(self, query: Optional[str] = None) -> Dict[str, int]:
450 | """Get the count of hosts matching a query.
451 |
452 | Args:
453 | query: FQL query string to filter hosts
454 |
455 | Returns:
456 | Dictionary with host count
457 | """
458 | # Prepare parameters
459 | params = prepare_api_parameters({
460 | "filter": query,
461 | })
462 |
463 | # Define the operation name
464 | operation = "QueryDevices"
465 |
466 | # Make the API request
467 | response = self.client.command(operation, parameters=params)
468 |
469 | # Use handle_api_response to get host IDs
470 | host_ids = handle_api_response(
471 | response,
472 | operation=operation,
473 | error_message="Failed to get host count",
474 | default_result=[],
475 | )
476 |
477 | # If handle_api_response returns an error dict instead of a list,
478 | # it means there was an error, so we return it with a count of 0
479 | if isinstance(host_ids, dict) and "error" in host_ids:
480 | return {"count": 0, **host_ids}
481 |
482 | return {"count": len(host_ids)}
483 | ```
484 |
485 | Don't forget to update the `API_SCOPE_REQUIREMENTS` dictionary in `falcon_mcp/common/errors.py`:
486 |
487 | ```python
488 | API_SCOPE_REQUIREMENTS = {
489 | # Existing operations...
490 | "QueryDevices": ["hosts:read"],
491 | "GetDeviceDetails": ["hosts:read"],
492 | # Add more operations as needed
493 | }
494 | ```
495 |
496 | The module will be automatically discovered by the registry system - no manual imports or registration needed.
497 |
```
--------------------------------------------------------------------------------
/falcon_mcp/resources/intel.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Contains Intel resources.
3 | """
4 |
5 | from falcon_mcp.common.utils import generate_md_table
6 |
7 | QUERY_ACTOR_ENTITIES_FQL_FILTERS = [
8 | (
9 | "Name",
10 | "Type",
11 | "Description",
12 | ),
13 | (
14 | "id",
15 | "Number",
16 | """
17 | The adversary's ID.
18 |
19 | Ex: 2583
20 | """
21 | ),
22 | (
23 | "actor_type",
24 | "String",
25 | """
26 | The type of adversary.
27 |
28 | Ex: "targeted"
29 | """
30 | ),
31 | (
32 | "actors.id",
33 | "Number",
34 | """
35 | The ID of an associated actor.
36 |
37 | Ex: 1823
38 | """
39 | ),
40 | (
41 | "actors.name",
42 | "String",
43 | """
44 | The name of an associated actor.
45 |
46 | Ex: "VENOMOUS BEAR"
47 | """
48 | ),
49 | (
50 | "actors.slug",
51 | "String",
52 | """
53 | The URL-friendly identifier of an associated actor.
54 |
55 | Ex: "venomous-bear"
56 | """
57 | ),
58 | (
59 | "actors.url",
60 | "String",
61 | """
62 | The URL to the actor's profile page.
63 |
64 | Ex: "https://falcon.crowdstrike.com/intelligence/actors/venomous-bear/"
65 | """
66 | ),
67 | (
68 | "animal_classifier",
69 | "String",
70 | """
71 | The animal classification assigned to the adversary.
72 |
73 | Ex: "BEAR"
74 | """
75 | ),
76 | (
77 | "capability.value",
78 | "String",
79 | """
80 | The adversary's capability.
81 |
82 | Ex: "average"
83 | """
84 | ),
85 | (
86 | "created_date",
87 | "Timestamp",
88 | """
89 | Timestamp when the actor entity was created.
90 |
91 | Ex: 1441729727
92 | """
93 | ),
94 | (
95 | "description",
96 | "String",
97 | """
98 | A detailed description of the adversary.
99 |
100 | Ex: "VENOMOUS BEAR is a sophisticated Russia-based adversary..."
101 | """
102 | ),
103 | (
104 | "first_activity_date",
105 | "Timestamp",
106 | """
107 | First activity date.
108 |
109 | Ex: 1094660880
110 | """
111 | ),
112 | (
113 | "known_as",
114 | "String",
115 | """
116 | The adversary's alias.
117 |
118 | Ex: "dridex"
119 | """
120 | ),
121 | (
122 | "last_activity_date",
123 | "Timestamp",
124 | """
125 | Last activity date.
126 |
127 | Ex: 1749427200
128 | """
129 | ),
130 | (
131 | "last_modified_date",
132 | "Timestamp",
133 | """
134 | Timestamp when the actor entity was last modified.
135 |
136 | Ex: 1754320661
137 | """
138 | ),
139 | (
140 | "motivations.id",
141 | "Number",
142 | """
143 | The ID of a motivation associated with the adversary.
144 |
145 | Ex: 1001485
146 | """
147 | ),
148 | (
149 | "motivations.slug",
150 | "String",
151 | """
152 | The URL-friendly identifier of a motivation.
153 |
154 | Ex: "state-sponsored"
155 | """
156 | ),
157 | (
158 | "motivations.value",
159 | "String",
160 | """
161 | The display name of a motivation.
162 |
163 | Ex: "State-Sponsored"
164 | """
165 | ),
166 | (
167 | "name",
168 | "String",
169 | """
170 | The adversary's name.
171 |
172 | Ex: "FANCY BEAR"
173 | """
174 | ),
175 | (
176 | "origins.slug",
177 | "String",
178 | """
179 | The adversary's country of origin slug.
180 |
181 | Ex: "ru"
182 | """
183 | ),
184 | (
185 | "origins.value",
186 | "String",
187 | """
188 | The adversary's country of origin.
189 |
190 | Ex: "Afghanistan"
191 | """
192 | ),
193 | (
194 | "short_description",
195 | "String",
196 | """
197 | A truncated version of the adversary's description.
198 |
199 | Ex: "VENOMOUS BEAR is a sophisticated Russia-based adversary..."
200 | """
201 | ),
202 | (
203 | "slug",
204 | "String",
205 | """
206 | The URL-friendly identifier of the adversary.
207 |
208 | Ex: "fancy-bear"
209 | """
210 | ),
211 | (
212 | "target_countries.id",
213 | "Number",
214 | """
215 | The ID of a target country.
216 |
217 | Ex: 1
218 | """
219 | ),
220 | (
221 | "target_countries.slug",
222 | "String",
223 | """
224 | The URL-friendly identifier of a target country.
225 |
226 | Ex: "us"
227 | """
228 | ),
229 | (
230 | "target_countries.value",
231 | "String",
232 | """
233 | The display name of a target country.
234 |
235 | Ex: "United States"
236 | """
237 | ),
238 | (
239 | "target_industries.id",
240 | "Number",
241 | """
242 | The ID of a target industry.
243 |
244 | Ex: 344
245 | """
246 | ),
247 | (
248 | "target_industries.slug",
249 | "String",
250 | """
251 | The URL-friendly identifier of a target industry.
252 |
253 | Ex: "government"
254 | """
255 | ),
256 | (
257 | "target_industries.value",
258 | "String",
259 | """
260 | The display name of a target industry.
261 |
262 | Ex: "Government"
263 | """
264 | ),
265 | (
266 | "url",
267 | "String",
268 | """
269 | The URL to the adversary's profile page.
270 |
271 | Ex: "https://falcon.crowdstrike.com/intelligence/actors/fancy-bear/"
272 | """
273 | ),
274 | ]
275 |
276 | QUERY_ACTOR_ENTITIES_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Intel Query Actor Entities Guide
277 |
278 | === BASIC SYNTAX ===
279 | property_name:[operator]'value'
280 |
281 | === AVAILABLE OPERATORS ===
282 | • No operator = equals (default)
283 | • ! = not equal to
284 | • > = greater than
285 | • >= = greater than or equal
286 | • < = less than
287 | • <= = less than or equal
288 | • ~ = text match (ignores case, spaces, punctuation)
289 | • !~ = does not text match
290 | • * = wildcard matching (one or more characters)
291 |
292 | === DATA TYPES & SYNTAX ===
293 | • Strings: 'value' or ['exact_value'] for exact match
294 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
295 | • Booleans: true or false (no quotes)
296 | • Numbers: 123 (no quotes)
297 | • Wildcards: 'partial*' or '*partial' or '_partial_'
298 |
299 | === COMBINING CONDITIONS ===
300 | • + = AND condition
301 | • , = OR condition
302 | • ( ) = Group expressions
303 |
304 | === falcon_search_actors FQL filter options ===
305 |
306 | """ + generate_md_table(QUERY_ACTOR_ENTITIES_FQL_FILTERS) + """
307 |
308 | === EXAMPLE USAGE ===
309 |
310 | • animal_classifier:'BEAR'
311 | • name:'FANCY BEAR'
312 | • animal_classifier:'BEAR',animal_classifier:'SPIDER'
313 |
314 | === IMPORTANT NOTES ===
315 | • Use single quotes around string values: 'value'
316 | • Use square brackets for exact matches: ['exact_value']
317 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
318 | """
319 |
320 | QUERY_INDICATOR_ENTITIES_FQL_FILTERS = [
321 | (
322 | "Name",
323 | "Type",
324 | "Description"
325 | ),
326 | (
327 | "id",
328 | "String",
329 | """
330 | The indicator ID. It follows the format: {type}_{indicator}
331 | """
332 | ),
333 | (
334 | "created_date",
335 | "Timestamp",
336 | """
337 | Timestamp in standard Unix time, UTC when the indicator was created.
338 |
339 | Ex: 1753022288
340 | """
341 | ),
342 | (
343 | "deleted",
344 | "Boolean",
345 | """
346 | If true, include only published indicators.
347 | If false, include only deleted indicators.
348 |
349 | Ex: false
350 | """
351 | ),
352 | (
353 | "domain_types",
354 | "String",
355 | """
356 | The domain type of domain indicators.
357 |
358 | Possible values include:
359 | - ActorControlled
360 | - DGA
361 | - DynamicDNS
362 | - KnownGood
363 | - LegitimateCompromised
364 | - PhishingDomain
365 | - Sinkholed
366 | - StrategicWebCompromise
367 | - Unregistered
368 | """
369 | ),
370 | (
371 | "indicator",
372 | "String",
373 | """
374 | The indicator that was queried.
375 |
376 | Ex: "all-deutsch.gl.at.ply.gg"
377 | """
378 | ),
379 | (
380 | "ip_address_types",
381 | "String",
382 | """
383 | The address type of ip_address indicators.
384 |
385 | Possible values include:
386 | - HtranDestinationNode
387 | - HtranProxy
388 | - LegitimateCompromised
389 | - Parking
390 | - PopularSite
391 | - SharedWebHost
392 | - Sinkhole
393 | - TorProxy
394 | """
395 | ),
396 | (
397 | "kill_chains",
398 | "String",
399 | """
400 | The point in the kill chain at which an indicator is associated.
401 |
402 | Possible values include:
403 | - reconnaissance
404 | - weaponization
405 | - delivery
406 | - exploitation
407 | - installation
408 | - c2 (Command and Control)
409 | - actionOnObjectives
410 |
411 | Ex: "delivery"
412 | """
413 | ),
414 | (
415 | "last_updated",
416 | "Timestamp",
417 | """
418 | Timestamp in standard Unix time, UTC when the indicator was last updated in the internal database.
419 |
420 | Ex: 1753027269
421 | """
422 | ),
423 | (
424 | "malicious_confidence",
425 | "String",
426 | """
427 | Indicates a confidence level by which an indicator is considered to be malicious.
428 |
429 | Possible values:
430 | - high: If indicator is an IP or domain, it has been associated with malicious activity within the last 60 days.
431 | - medium: If indicator is an IP or domain, it has been associated with malicious activity within the last 60-120 days.
432 | - low: If indicator is an IP or domain, it has been associated with malicious activity exceeding 120 days.
433 | - unverified: This indicator has not been verified by a CrowdStrike Intelligence analyst or an automated system.
434 |
435 | Ex: "high"
436 | """
437 | ),
438 | (
439 | "malware_families",
440 | "String",
441 | """
442 | Indicates the malware family an indicator has been associated with. An indicator might be associated with more than one malware family.
443 |
444 | Ex: "Xworm", "njRATLime"
445 | """
446 | ),
447 | (
448 | "published_date",
449 | "Timestamp",
450 | """
451 | Timestamp in standard Unix time, UTC when the indicator was first published to the API.
452 |
453 | Ex: 1753022288
454 | """
455 | ),
456 | (
457 | "reports",
458 | "String",
459 | """
460 | The report ID that the indicator is associated with (such as CSIT-XXXX or CSIR-XXXX).
461 | The report list is also represented under the labels list in the JSON data structure.
462 | """
463 | ),
464 | (
465 | "targets",
466 | "String",
467 | """
468 | The indicators targeted industries.
469 |
470 | Possible values include sectors like:
471 | - Aerospace
472 | - Agricultural
473 | - Chemical
474 | - Defense
475 | - Dissident
476 | - Energy
477 | - Financial
478 | - Government
479 | - Healthcare
480 | - Technology
481 | """
482 | ),
483 | (
484 | "threat_types",
485 | "String",
486 | """
487 | Types of threats.
488 |
489 | Ex: "ddos", "mineware", "banking"
490 | """
491 | ),
492 | (
493 | "type",
494 | "String",
495 | """
496 | Possible indicator types include:
497 | - binary_string
498 | - compile_time
499 | - device_name
500 | - domain
501 | - email_address
502 | - email_subject
503 | - event_name
504 | - file_mapping
505 | - file_name
506 | - file_path
507 | - hash_ion
508 | - hash_md5
509 | - hash_sha256
510 | - ip_address
511 | - ip_address_block
512 | - mutex_name
513 | - password
514 | - persona_name
515 | - phone_number
516 | - port
517 | - registry
518 | - semaphore_name
519 | - service_name
520 | - url
521 | - user_agent
522 | - username
523 | - x509_seria
524 | - x509_subject
525 |
526 | Ex: "domain"
527 | """
528 | ),
529 | (
530 | "vulnerabilities",
531 | "String",
532 | """
533 | Associated vulnerabilities (CVEs).
534 |
535 | Ex: "CVE-2023-1234"
536 | """
537 | ),
538 | ]
539 |
540 | QUERY_INDICATOR_ENTITIES_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Intel Query Indicator Entities Guide
541 |
542 | === BASIC SYNTAX ===
543 | property_name:[operator]'value'
544 |
545 | === AVAILABLE OPERATORS ===
546 | • No operator = equals (default)
547 | • ! = not equal to
548 | • > = greater than
549 | • >= = greater than or equal
550 | • < = less than
551 | • <= = less than or equal
552 | • ~ = text match (ignores case, spaces, punctuation)
553 | • !~ = does not text match
554 | • * = wildcard matching (one or more characters)
555 |
556 | === DATA TYPES & SYNTAX ===
557 | • Strings: 'value' or ['exact_value'] for exact match
558 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
559 | • Booleans: true or false (no quotes)
560 | • Numbers: 123 (no quotes)
561 | • Wildcards: 'partial*' or '*partial' or '*partial*'
562 |
563 | === COMBINING CONDITIONS ===
564 | • + = AND condition
565 | • , = OR condition
566 | • ( ) = Group expressions
567 |
568 | === falcon_search_indicators FQL filter options ===
569 |
570 | """ + generate_md_table(QUERY_INDICATOR_ENTITIES_FQL_FILTERS) + """
571 |
572 | === EXAMPLE USAGE ===
573 |
574 | • type:'domain'
575 | • malicious_confidence:'high'
576 | • type:'hash_md5'+malicious_confidence:'high'
577 | • created_date:>'2023-01-01T00:00:00Z'
578 |
579 | === IMPORTANT NOTES ===
580 | • Use single quotes around string values: 'value'
581 | • Use square brackets for exact matches: ['exact_value']
582 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
583 | """
584 |
585 |
586 | QUERY_REPORT_ENTITIES_FQL_FILTERS = [
587 | (
588 | "Name",
589 | "Type",
590 | "Description",
591 | ),
592 | (
593 | "id",
594 | "Number",
595 | """
596 | The report's ID.
597 |
598 | Ex: 2583
599 | """
600 | ),
601 | (
602 | "actors",
603 | "String",
604 | """
605 | Names of adversaries included in a report.
606 |
607 | Ex: "FANCY BEAR"
608 | """
609 | ),
610 | (
611 | "created_date",
612 | "Timestamp",
613 | """
614 | Timestamp in Unix epoch format when the report was created.
615 |
616 | Ex: 1754075803
617 | """
618 | ),
619 | (
620 | "description",
621 | "String",
622 | """
623 | A detailed description of the report.
624 |
625 | Ex: "In mid-July 2025, CrowdStrike Intelligence identified infrastructure..."
626 | """
627 | ),
628 | (
629 | "last_modified_date",
630 | "Timestamp",
631 | """
632 | Timestamp in Unix epoch format when the report was last modified.
633 |
634 | Ex: 1754076191
635 | """
636 | ),
637 | (
638 | "motivations.value",
639 | "String",
640 | """
641 | Motivations included in the report.
642 |
643 | Ex: "Criminal", "State-Sponsored"
644 | """
645 | ),
646 | (
647 | "name",
648 | "String",
649 | """
650 | The report's name.
651 |
652 | Ex: "CSA-250861 Newly Identified HAYWIRE KITTEN Infrastructure Associated with Microsoft Phishing Campaign"
653 | """
654 | ),
655 | (
656 | "type",
657 | "String",
658 | """
659 | The type of report.
660 |
661 | Ex: "notice", "tipper", "periodic-report"
662 | """
663 | ),
664 | (
665 | "short_description",
666 | "String",
667 | """
668 | A truncated version of the report's description.
669 |
670 | Ex: "Adversary: HAYWIRE KITTEN || Target Industry: Technology, Renewable Energy..."
671 | """
672 | ),
673 | (
674 | "slug",
675 | "String",
676 | """
677 | The URL-friendly identifier of the report.
678 |
679 | Ex: "csa-250861", "csit-25151"
680 | """
681 | ),
682 | (
683 | "sub_type",
684 | "String",
685 | """
686 | The subtype of the report.
687 |
688 | Ex: "daily", "yara"
689 | """
690 | ),
691 | (
692 | "tags",
693 | "String",
694 | """
695 | The report's tags.
696 |
697 | Ex: "ransomware", "espionage", "vulnerabilities"
698 | """
699 | ),
700 | (
701 | "target_countries",
702 | "String",
703 | """
704 | Targeted countries included in the report.
705 |
706 | Ex: "United States", "Taiwan", "Western Europe"
707 | """
708 | ),
709 | (
710 | "target_industries",
711 | "String",
712 | """
713 | Targeted industries included in the report.
714 |
715 | Ex: "Technology", "Government", "Healthcare"
716 | """
717 | ),
718 | (
719 | "url",
720 | "String",
721 | """
722 | The URL to the report's page.
723 |
724 | Ex: "https://falcon.crowdstrike.com/intelligence/reports/csa-250861"
725 | """
726 | ),
727 | ]
728 |
729 | QUERY_REPORT_ENTITIES_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Intel Query Report Entities Guide
730 |
731 | === BASIC SYNTAX ===
732 | property_name:[operator]'value'
733 |
734 | === AVAILABLE OPERATORS ===
735 | • No operator = equals (default)
736 | • ! = not equal to
737 | • > = greater than
738 | • >= = greater than or equal
739 | • < = less than
740 | • <= = less than or equal
741 | • ~ = text match (ignores case, spaces, punctuation)
742 | • !~ = does not text match
743 | • * = wildcard matching (one or more characters)
744 |
745 | === DATA TYPES & SYNTAX ===
746 | • Strings: 'value' or ['exact_value'] for exact match
747 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
748 | • Booleans: true or false (no quotes)
749 | • Numbers: 123 (no quotes)
750 | • Wildcards: 'partial*' or '*partial' or '*partial*'
751 |
752 | === COMBINING CONDITIONS ===
753 | • + = AND condition
754 | • , = OR condition
755 | • ( ) = Group expressions
756 |
757 | === falcon_search_reports FQL filter options ===
758 |
759 | """ + generate_md_table(QUERY_REPORT_ENTITIES_FQL_FILTERS) + """
760 |
761 | === EXAMPLE USAGE ===
762 |
763 | • report_type:'malware'
764 | • name:'*ransomware*'
765 | • created_date:>'2023-01-01T00:00:00Z'
766 | • target_industries:'healthcare'
767 |
768 | === IMPORTANT NOTES ===
769 | • Use single quotes around string values: 'value'
770 | • Use square brackets for exact matches: ['exact_value']
771 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
772 | """
773 |
```
--------------------------------------------------------------------------------
/tests/e2e/modules/test_cloud.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | E2E tests for the Cloud module.
3 | """
4 |
5 | import json
6 | import unittest
7 |
8 | import pytest
9 |
10 | from tests.e2e.utils.base_e2e_test import BaseE2ETest
11 |
12 |
13 | @pytest.mark.e2e
14 | class TestCloudModuleE2E(BaseE2ETest):
15 | """
16 | End-to-end test suite for the Falcon MCP Server Cloud Module.
17 | """
18 |
19 | def test_search_kubernetes_containers_running(self):
20 | """Verify the agent can search for kubernetes containers that are running."""
21 |
22 | async def test_logic():
23 | fixtures = [
24 | {
25 | "operation": "ReadContainerCombined",
26 | "validator": lambda kwargs: "running_status"
27 | in kwargs.get("parameters", {}).get("filter", "").lower(),
28 | "response": {
29 | "status_code": 200,
30 | "body": {
31 | "resources": [
32 | {
33 | "container_id": "container-001",
34 | "agents": [
35 | {
36 | "aid": "558ce490595748d6a67b16969797d655",
37 | "build": "0000",
38 | "type": "Falcon sensor for linux",
39 | },
40 | ],
41 | "cloud_name": "AWS",
42 | "cloud_account_id": "00001",
43 | "cloud_region": "ca-central-1",
44 | "cluster_name": "production",
45 | "first_seen": "2025-05-27T03:04:10Z",
46 | "image_registry": "docker.amazonaws.com",
47 | "image_repository": "myservice",
48 | "image_tag": "v1.0.0",
49 | "image_vulnerability_count": 361,
50 | "last_seen": "2025-07-13T19:53:07Z",
51 | "container_name": "myservice",
52 | "namespace": "default",
53 | "running_status": True,
54 | },
55 | {
56 | "container_id": "container-002",
57 | "agents": [
58 | {
59 | "aid": "523c3113363845d4a6da493a29caa924",
60 | "build": "0000",
61 | "type": "Falcon sensor for linux",
62 | },
63 | ],
64 | "cloud_name": "AWS",
65 | "cloud_account_id": "00001",
66 | "cloud_region": "us-1",
67 | "cluster_name": "production",
68 | "first_seen": "2025-06-27T03:04:10Z",
69 | "image_registry": "docker.amazonaws.com",
70 | "image_repository": "myservice",
71 | "image_tag": "v1.0.0",
72 | "image_vulnerability_count": 361,
73 | "last_seen": "2025-07-13T19:53:07Z",
74 | "container_name": "myservice",
75 | "namespace": "default",
76 | "running_status": True,
77 | },
78 | ],
79 | },
80 | },
81 | },
82 | ]
83 |
84 | self._mock_api_instance.command.side_effect = (
85 | self._create_mock_api_side_effect(fixtures)
86 | )
87 |
88 | prompt = "Find all kubernetes containers that are running"
89 | return await self._run_agent_stream(prompt)
90 |
91 | def assertions(tools, result):
92 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
93 | used_tool = tools[len(tools) - 1]
94 | self.assertEqual(
95 | used_tool["input"]["tool_name"], "falcon_search_kubernetes_containers"
96 | )
97 |
98 | # Check for the filter for running status
99 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
100 | self.assertTrue(
101 | "running_status" in tool_input_str,
102 | f"Expected running status filtering in tool input: {tool_input_str}",
103 | )
104 |
105 | self.assertIn("container-001", used_tool["output"])
106 | self.assertIn("container-002", used_tool["output"])
107 |
108 | # Verify API calls were made correctly
109 | self.assertGreaterEqual(
110 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
111 | )
112 |
113 | # Check API call (ReadContainerCombined)
114 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
115 | "parameters", {}
116 | )
117 | filter_str = api_call_params.get("filter", "").lower()
118 | self.assertTrue(
119 | "running_status" in filter_str,
120 | f"Expected running_status filtering in API call: {filter_str}",
121 | )
122 |
123 | # Verify result contains expected information
124 | self.assertIn("container-001", result)
125 | self.assertIn("container-002", result)
126 |
127 | self.run_test_with_retries(
128 | "test_search_kubernetes_containers_running", test_logic, assertions
129 | )
130 |
131 | def test_search_kubernetes_container_with_vulnerabilities(self):
132 | """Verify the agent can search for kubernetes containers have image vulnerabilities and sort them
133 | by image_vulnerability_count in descending order.
134 | """
135 |
136 | async def test_logic():
137 | fixtures = [
138 | {
139 | "operation": "ReadContainerCombined",
140 | "validator": lambda kwargs: "image_vulnerability_count"
141 | in kwargs.get("parameters", {}).get("filter", "").lower(),
142 | "response": {
143 | "status_code": 200,
144 | "body": {
145 | "resources": [
146 | {
147 | "container_id": "container-001",
148 | "agents": [
149 | {
150 | "aid": "558ce490595748d6a67b16969797d655",
151 | "build": "0000",
152 | "type": "Falcon sensor for linux",
153 | },
154 | ],
155 | "cloud_name": "AWS",
156 | "cloud_account_id": "00001",
157 | "cloud_region": "ca-central-1",
158 | "cluster_name": "production",
159 | "first_seen": "2025-05-27T03:04:10Z",
160 | "image_registry": "docker.amazonaws.com",
161 | "image_repository": "myservice",
162 | "image_tag": "v1.0.0",
163 | "image_vulnerability_count": 361,
164 | "last_seen": "2025-07-13T19:53:07Z",
165 | "container_name": "myservice",
166 | "namespace": "default",
167 | "running_status": True,
168 | },
169 | ],
170 | },
171 | },
172 | },
173 | ]
174 |
175 | self._mock_api_instance.command.side_effect = (
176 | self._create_mock_api_side_effect(fixtures)
177 | )
178 |
179 | prompt = "Find top 1 kubernetes container that is running and have image vulnerabilities." # fmt: skip
180 | return await self._run_agent_stream(prompt)
181 |
182 | def assertions(tools, result):
183 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
184 | used_tool = tools[len(tools) - 1]
185 |
186 | self.assertEqual(
187 | used_tool["input"]["tool_name"], "falcon_search_kubernetes_containers"
188 | )
189 |
190 | # Check for the filter for image_vulnerability_count
191 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
192 | self.assertTrue(
193 | "image_vulnerability_count" in tool_input_str,
194 | f"Expected image_vulnerability_count filtering in tool input: {tool_input_str}",
195 | )
196 |
197 | self.assertIn("container-001", used_tool["output"])
198 |
199 | # Verify API calls were made correctly
200 | self.assertGreaterEqual(
201 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
202 | )
203 |
204 | # Check API call (ReadContainerCombined)
205 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
206 | "parameters", {}
207 | )
208 |
209 | filter_str = api_call_params.get("filter", "").lower()
210 | self.assertTrue(
211 | "image_vulnerability_count:>0" in filter_str,
212 | f"Expected image_vulnerability_count filtering in API call: {filter_str}",
213 | )
214 |
215 | # Verify result contains expected information
216 | self.assertIn("container-001", result)
217 | self.assertIn("361", result) # vulnerability count
218 |
219 | self.run_test_with_retries(
220 | "test_search_kubernetes_container_with_vulnerabilities",
221 | test_logic,
222 | assertions,
223 | )
224 |
225 | def test_count_kubernetes_containers_by_cloud_name(self):
226 | """Verify the agent can aggregate kubernetes containers by cloud name."""
227 |
228 | async def test_logic():
229 | fixtures = [
230 | {
231 | "operation": "ReadContainerCount",
232 | "validator": lambda kwargs: "cloud_name"
233 | in kwargs.get("parameters", {}).get("filter", "").lower(),
234 | "response": {
235 | "status_code": 200,
236 | "body": {
237 | "resources": [
238 | {
239 | "count": 333,
240 | },
241 | ],
242 | },
243 | },
244 | },
245 | ]
246 |
247 | self._mock_api_instance.command.side_effect = (
248 | self._create_mock_api_side_effect(fixtures)
249 | )
250 |
251 | prompt = "How many kubernetes containers do I have in cloud provider AWS?"
252 | return await self._run_agent_stream(prompt)
253 |
254 | def assertions(tools, result):
255 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
256 | used_tool = tools[len(tools) - 1]
257 | self.assertEqual(
258 | used_tool["input"]["tool_name"], "falcon_count_kubernetes_containers"
259 | )
260 |
261 | # Check for the filter for cloud_name
262 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
263 | self.assertTrue(
264 | "cloud_name" in tool_input_str,
265 | f"Expected cloud_name filtering in tool input: {tool_input_str}",
266 | )
267 |
268 | self.assertIn("333", used_tool["output"])
269 |
270 | # Verify API calls were made correctly
271 | self.assertGreaterEqual(
272 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
273 | )
274 |
275 | # Check API call (ReadContainerCount)
276 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
277 | "parameters", {}
278 | )
279 |
280 | filter_str = api_call_params.get("filter", "").lower()
281 | self.assertTrue(
282 | "cloud_name" in filter_str,
283 | f"Expected cloud_name filtering in API call: {filter_str}",
284 | )
285 |
286 | # Verify result contains expected information
287 | self.assertIn("AWS", result) # cloud name
288 | self.assertIn("333", result) # containers count
289 |
290 | self.run_test_with_retries(
291 | "test_count_kubernetes_containers_by_cloud_name",
292 | test_logic,
293 | assertions,
294 | )
295 |
296 | def test_search_images_vulnerabilities_by_container_id(self):
297 | """Verify the agent can search images vulnerabilities by container ID."""
298 |
299 | async def test_logic():
300 | fixtures = [
301 | {
302 | "operation": "ReadCombinedVulnerabilities",
303 | "validator": lambda kwargs: "container_id"
304 | in kwargs.get("parameters", {}).get("filter", "").lower(),
305 | "response": {
306 | "status_code": 200,
307 | "body": {
308 | "resources": [
309 | {
310 | "cve_id": "CVE-2005-2541",
311 | "severity": "High",
312 | "cvss_score": 10,
313 | "cps_current_rating": "Low",
314 | "description": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.\n",
315 | "exploit_found": False,
316 | "exploited_status": 0,
317 | "exploited_status_string": "Unproven",
318 | "published_date": "2005-08-10T04:00:00Z",
319 | "images_impacted": 284,
320 | "packages_impacted": 7,
321 | "containers_impacted": 46,
322 | "remediation_available": False,
323 | },
324 | ],
325 | },
326 | },
327 | },
328 | ]
329 |
330 | self._mock_api_instance.command.side_effect = (
331 | self._create_mock_api_side_effect(fixtures)
332 | )
333 |
334 | prompt = 'Search images vulnerabilities for the container "container-001"'
335 | return await self._run_agent_stream(prompt)
336 |
337 | def assertions(tools, result):
338 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
339 | used_tool = tools[len(tools) - 1]
340 | self.assertEqual(
341 | used_tool["input"]["tool_name"],
342 | "falcon_search_images_vulnerabilities",
343 | )
344 |
345 | # Check for the filter for container_id
346 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
347 | self.assertTrue(
348 | "container_id" in tool_input_str,
349 | f"Expected container_id filtering in tool input: {tool_input_str}",
350 | )
351 |
352 | # Check for the vulnerability from the API response
353 | self.assertIn("CVE-2005-2541", used_tool["output"])
354 |
355 | # Verify API calls were made correctly
356 | self.assertGreaterEqual(
357 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
358 | )
359 |
360 | # Check API call (ReadContainerCombined)
361 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
362 | "parameters", {}
363 | )
364 |
365 | filter_str = api_call_params.get("filter", "").lower()
366 | self.assertTrue(
367 | "container_id:'container-001'" in filter_str,
368 | f"Expected container_id filtering in API call: {filter_str}",
369 | )
370 |
371 | # Verify result contains expected information
372 | self.assertIn("CVE-2005-2541", result)
373 |
374 | self.run_test_with_retries(
375 | "test_search_images_vulnerabilities_by_container_id",
376 | test_logic,
377 | assertions,
378 | )
379 |
380 |
381 | if __name__ == "__main__":
382 | unittest.main()
383 |
```
--------------------------------------------------------------------------------
/falcon_mcp/resources/discover.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Contains Discover resources for applications and unmanaged assets.
3 | """
4 |
5 | from falcon_mcp.common.utils import generate_md_table
6 |
7 | # List of tuples containing filter options data: (name, type, operators, description)
8 | SEARCH_APPLICATIONS_FQL_FILTERS = [
9 | (
10 | "Name",
11 | "Type",
12 | "Operators",
13 | "Description"
14 | ),
15 | (
16 | "architectures",
17 | "String",
18 | "Yes",
19 | """
20 | Application architecture. Unavailable for browser extensions.
21 |
22 | Ex: architectures:'x86'
23 | Ex: architectures:!'x64'
24 | Ex: architectures:['x86','x64']
25 | """
26 | ),
27 | (
28 | "category",
29 | "String",
30 | "Yes",
31 | """
32 | Category the application is in. Unavailable for browser extensions.
33 |
34 | Ex: category:'IT/Security Apps'
35 | Ex: category:'Web Browsers'
36 | Ex: category:'Back up and Recovery'
37 | Ex: category:['IT/Security Apps','Web Browsers']
38 | """
39 | ),
40 | (
41 | "cid",
42 | "String",
43 | "Yes",
44 | """
45 | The application's customer ID. In multi-CID environments:
46 | - You can filter on both parent and child CIDs.
47 | - If you're in a parent CID and leave this filter empty, the response includes data about the parent CID and all its child CIDs.
48 | - If you're in a parent CID and use this filter, the response includes data for only the CIDs you filtered on.
49 | - If you're in a child CID, this property will only show data for that CID.
50 |
51 | Ex: cid:'cxxx4'
52 | Ex: cid:!'cxxx4'
53 | Ex: cid:'cxxx4',cid:'dxxx5'
54 | """
55 | ),
56 | (
57 | "first_seen_timestamp",
58 | "Timestamp",
59 | "Yes",
60 | """
61 | Date and time the application was first seen.
62 |
63 | Ex: first_seen_timestamp:'2022-12-22T12:41:47.417Z'
64 | """
65 | ),
66 | (
67 | "groups",
68 | "String",
69 | "Yes",
70 | """
71 | All application groups the application is assigned to.
72 |
73 | Ex: groups:'ExampleAppGroup'
74 | Ex: groups:['AppGroup1','AppGroup2']
75 | """
76 | ),
77 | (
78 | "id",
79 | "String",
80 | "Yes",
81 | """
82 | Unique ID of the application. Each application ID represents a particular instance of an application on a particular asset.
83 |
84 | Ex: id:'a89xxxxx191'
85 | Ex: id:'a89xxxxx191',id:'a89xxxxx192'
86 | """
87 | ),
88 | (
89 | "installation_paths",
90 | "String",
91 | "Yes",
92 | """
93 | File paths of the application or executable file to the folder on the asset.
94 |
95 | Ex: installation_paths:'C:\\Program Files\\Internet Explorer\\iexplore.exe'
96 | Ex: installation_paths:'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'
97 | Ex: installation_paths:['C:\\Program Files (x86)\\Google*','C:\\Program Files (x86)\\Adobe*']
98 | """
99 | ),
100 | (
101 | "installation_timestamp",
102 | "Timestamp",
103 | "Yes",
104 | """
105 | Date and time the application was installed, if available.
106 |
107 | Ex: installation_timestamp:'2023-01-11T00:00:00.000Z'
108 | """
109 | ),
110 | (
111 | "is_normalized",
112 | "Boolean",
113 | "Yes",
114 | """
115 | Windows: Whether the application name is normalized (true/false).
116 | Applications can have different naming variations that result in different records for each variation.
117 | To avoid this duplication, the most common applications are listed under a single normalized application name.
118 | Unavailable for browser extensions.
119 |
120 | Ex: is_normalized:true
121 | """
122 | ),
123 | (
124 | "is_suspicious",
125 | "Boolean",
126 | "Yes",
127 | """
128 | Whether the application is suspicious based on how often it's been seen in a detection on that asset (true/false).
129 | Unavailable for browser extensions. See browser_extension.permission_severity instead.
130 |
131 | Ex: is_suspicious:true
132 | Ex: is_suspicious:!false
133 | """
134 | ),
135 | (
136 | "last_updated_timestamp",
137 | "Timestamp",
138 | "Yes",
139 | """
140 | Date and time the installation fields of the application instance most recently changed.
141 |
142 | Ex: last_updated_timestamp:'2022-12-22T12:41:47.417Z'
143 | """
144 | ),
145 | (
146 | "last_used_file_hash",
147 | "String",
148 | "Yes",
149 | """
150 | Windows and macOS: Most recent file hash used for the application.
151 |
152 | Ex: last_used_file_hash:'0xxxa'
153 | Ex: last_used_file_hash:['0xxxa','7xxxx9']
154 | """
155 | ),
156 | (
157 | "last_used_file_name",
158 | "String",
159 | "Yes",
160 | """
161 | Windows and macOS: Most recent file name used for the application.
162 |
163 | Ex: last_used_file_name:'setup.exe'
164 | Ex: last_used_file_name:'putty.exe'
165 | Ex: last_used_file_name:['setup.exe','putty.exe']
166 | """
167 | ),
168 | (
169 | "last_used_timestamp",
170 | "Timestamp",
171 | "Yes",
172 | """
173 | Windows and macOS: Date and time the application was most recently used.
174 |
175 | Ex: last_used_timestamp:'2023-01-10T23:00:00.000Z'
176 | """
177 | ),
178 | (
179 | "last_used_user_name",
180 | "String",
181 | "Yes",
182 | """
183 | Windows and macOS: Username of the account that most recently used the application.
184 |
185 | Ex: last_used_user_name:'Administrator'
186 | Ex: last_used_user_name:'xiany'
187 | Ex: last_used_user_name:['xiany','dursti']
188 | """
189 | ),
190 | (
191 | "last_used_user_sid",
192 | "String",
193 | "Yes",
194 | """
195 | Windows and macOS: Security identifier of the account that most recently used the application.
196 |
197 | Ex: last_used_user_sid:'S-1-x-x-xxxxxxxxxx-xxxxxxxxxx-xxxxxxxxxx-xxx1'
198 | Ex: last_used_user_sid:['S-x-x-x-x-1','S-x-x-x-7']
199 | """
200 | ),
201 | (
202 | "name",
203 | "String",
204 | "Yes",
205 | """
206 | Name of the application.
207 |
208 | Ex: name:'Chrome'
209 | Ex: name:'Falcon Sensor'
210 | Ex: name:['Chrome','Edge']
211 | """
212 | ),
213 | (
214 | "name_vendor",
215 | "String",
216 | "Yes",
217 | """
218 | To group results by application: The app name and vendor name for all application IDs with this application name.
219 |
220 | Ex: name_vendor:'Chrome-Google'
221 | Ex: name_vendor:'Tools-VMware'
222 | Ex: name_vendor:['Chrome-Google','Tools-VMware']
223 | """
224 | ),
225 | (
226 | "name_vendor_version",
227 | "String",
228 | "Yes",
229 | """
230 | To group results by application version: The app name, vendor name, and vendor version for all application IDs with this application name.
231 |
232 | Ex: name_vendor_version:'Chrome-Google-108.0.5359.99'
233 | Ex: name_vendor_version:'Flash Player-Adobe-32.0.0.387'
234 | Ex: name_vendor_version:['Chrome-Google-108*','Flash Player-Adobe-32*']
235 | """
236 | ),
237 | (
238 | "software_type",
239 | "String",
240 | "Yes",
241 | """
242 | The type of software: 'application' or 'browser_extension'.
243 |
244 | Ex: software_type:'application'
245 | """
246 | ),
247 | (
248 | "vendor",
249 | "String",
250 | "Yes",
251 | """
252 | Name of the application vendor.
253 |
254 | Ex: vendor:'Microsoft Corporation'
255 | Ex: vendor:'Google'
256 | Ex: vendor:'CrowdStrike'
257 | Ex: vendor:['Microsoft*','Google']
258 | """
259 | ),
260 | (
261 | "version",
262 | "String",
263 | "Yes",
264 | """
265 | Application version.
266 |
267 | Ex: version:'4.8.4320.0'
268 | Ex: version:'108.0.5359.99'
269 | Ex: version:'6.50.16403.0'
270 | Ex: version:['6.50.16403.0','6.50.16403.1']
271 | """
272 | ),
273 | (
274 | "versioning_scheme",
275 | "String",
276 | "Yes",
277 | """
278 | Versioning scheme of the application. Unavailable for browser extensions.
279 |
280 | Ex: versioning_scheme:'semver'
281 | Ex: versioning_scheme:['semver','calver']
282 | """
283 | ),
284 | ]
285 |
286 | SEARCH_APPLICATIONS_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Search Applications Guide
287 |
288 | === BASIC SYNTAX ===
289 | property_name:[operator]'value'
290 |
291 | === AVAILABLE OPERATORS ===
292 | • No operator = equals (default)
293 | • ! = not equal to
294 | • > = greater than
295 | • >= = greater than or equal
296 | • < = less than
297 | • <= = less than or equal
298 | • ~ = text match (ignores case, spaces, punctuation)
299 | • !~ = does not text match
300 |
301 | === DATA TYPES & SYNTAX ===
302 | • Strings: 'value' or ['exact_value'] for exact match
303 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
304 | • Booleans: true or false (no quotes)
305 | • Numbers: 123 (no quotes)
306 |
307 | === COMBINING CONDITIONS ===
308 | • + = AND condition
309 | • , = OR condition
310 | • ( ) = Group expressions
311 |
312 | === falcon_search_applications FQL filter options ===
313 |
314 | """ + generate_md_table(SEARCH_APPLICATIONS_FQL_FILTERS) + """
315 |
316 | === IMPORTANT NOTES ===
317 | • Use single quotes around string values: 'value'
318 | • Use square brackets for exact matches and multiple values: ['value1','value2']
319 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
320 | • Boolean values: true or false (no quotes)
321 | • Some fields require specific capitalization (check individual field descriptions)
322 |
323 | === COMMON FILTER EXAMPLES ===
324 | • Find Chrome applications: name:'Chrome'
325 | • Find applications from Microsoft: vendor:'Microsoft Corporation'
326 | • Find recently installed applications: installation_timestamp:>'2024-01-01'
327 | • Find suspicious applications: is_suspicious:true
328 | • Find browser extensions: software_type:'browser_extension'
329 | • Find applications used by a specific user: last_used_user_name:'Administrator'
330 | """
331 |
332 | # List of tuples containing filter options for unmanaged assets
333 | SEARCH_UNMANAGED_ASSETS_FQL_FILTERS = [
334 | (
335 | "Name",
336 | "Type",
337 | "Operators",
338 | "Description"
339 | ),
340 | (
341 | "platform_name",
342 | "String",
343 | "Yes",
344 | """
345 | Operating system platform of the unmanaged asset.
346 |
347 | Ex: platform_name:'Windows'
348 | Ex: platform_name:'Linux'
349 | Ex: platform_name:'Mac'
350 | Ex: platform_name:['Windows','Linux']
351 | """
352 | ),
353 | (
354 | "os_version",
355 | "String",
356 | "Yes",
357 | """
358 | Operating system version of the unmanaged asset.
359 |
360 | Ex: os_version:'Windows 10'
361 | Ex: os_version:'Ubuntu 20.04'
362 | Ex: os_version:'macOS 12.3'
363 | Ex: os_version:*'Windows*'
364 | """
365 | ),
366 | (
367 | "hostname",
368 | "String",
369 | "Yes",
370 | """
371 | Hostname of the unmanaged asset.
372 |
373 | Ex: hostname:'PC-001'
374 | Ex: hostname:*'PC-*'
375 | Ex: hostname:['PC-001','PC-002']
376 | """
377 | ),
378 | (
379 | "country",
380 | "String",
381 | "Yes",
382 | """
383 | Country where the unmanaged asset is located.
384 |
385 | Ex: country:'United States of America'
386 | Ex: country:'Germany'
387 | Ex: country:['United States of America','Canada']
388 | """
389 | ),
390 | (
391 | "city",
392 | "String",
393 | "Yes",
394 | """
395 | City where the unmanaged asset is located.
396 |
397 | Ex: city:'New York'
398 | Ex: city:'London'
399 | Ex: city:['New York','Los Angeles']
400 | """
401 | ),
402 | (
403 | "product_type_desc",
404 | "String",
405 | "Yes",
406 | """
407 | Product type description of the unmanaged asset.
408 |
409 | Ex: product_type_desc:'Workstation'
410 | Ex: product_type_desc:'Server'
411 | Ex: product_type_desc:'Domain Controller'
412 | Ex: product_type_desc:['Workstation','Server']
413 | """
414 | ),
415 | (
416 | "external_ip",
417 | "String",
418 | "Yes",
419 | """
420 | External IP address of the unmanaged asset.
421 |
422 | Ex: external_ip:'192.0.2.1'
423 | Ex: external_ip:'192.0.2.0/24'
424 | Ex: external_ip:['192.0.2.1','203.0.113.1']
425 | """
426 | ),
427 | (
428 | "local_ip_addresses",
429 | "String",
430 | "Yes",
431 | """
432 | Local IP addresses of the unmanaged asset.
433 |
434 | Ex: local_ip_addresses:'10.0.1.100'
435 | Ex: local_ip_addresses:'192.168.1.0/24'
436 | Ex: local_ip_addresses:['10.0.1.100','192.168.1.50']
437 | """
438 | ),
439 | (
440 | "mac_addresses",
441 | "String",
442 | "Yes",
443 | """
444 | MAC addresses of the unmanaged asset.
445 |
446 | Ex: mac_addresses:'AA-BB-CC-DD-EE-FF'
447 | Ex: mac_addresses:*'AA-BB-CC*'
448 | Ex: mac_addresses:['AA-BB-CC-DD-EE-FF','11-22-33-44-55-66']
449 | """
450 | ),
451 | (
452 | "first_seen_timestamp",
453 | "Timestamp",
454 | "Yes",
455 | """
456 | Date and time when the unmanaged asset was first discovered.
457 |
458 | Ex: first_seen_timestamp:'2024-01-01T00:00:00Z'
459 | Ex: first_seen_timestamp:>'2024-01-01T00:00:00Z'
460 | Ex: first_seen_timestamp:>'now-7d'
461 | """
462 | ),
463 | (
464 | "last_seen_timestamp",
465 | "Timestamp",
466 | "Yes",
467 | """
468 | Date and time when the unmanaged asset was last seen.
469 |
470 | Ex: last_seen_timestamp:'2024-06-15T12:00:00Z'
471 | Ex: last_seen_timestamp:>'now-24h'
472 | Ex: last_seen_timestamp:<'now-30d'
473 | """
474 | ),
475 | (
476 | "kernel_version",
477 | "String",
478 | "Yes",
479 | """
480 | Kernel version of the unmanaged asset.
481 | Linux and Mac: The major version, minor version, and patch version.
482 | Windows: The build number.
483 |
484 | Ex: kernel_version:'5.4.0'
485 | Ex: kernel_version:'19041'
486 | Ex: kernel_version:*'5.4*'
487 | """
488 | ),
489 | (
490 | "system_manufacturer",
491 | "String",
492 | "Yes",
493 | """
494 | System manufacturer of the unmanaged asset.
495 |
496 | Ex: system_manufacturer:'Dell Inc.'
497 | Ex: system_manufacturer:'VMware, Inc.'
498 | Ex: system_manufacturer:*'Dell*'
499 | """
500 | ),
501 | (
502 | "system_product_name",
503 | "String",
504 | "Yes",
505 | """
506 | System product name of the unmanaged asset.
507 |
508 | Ex: system_product_name:'OptiPlex 7090'
509 | Ex: system_product_name:'VMware Virtual Platform'
510 | Ex: system_product_name:*'OptiPlex*'
511 | """
512 | ),
513 | (
514 | "criticality",
515 | "String",
516 | "Yes",
517 | """
518 | Criticality level assigned to the unmanaged asset.
519 |
520 | Ex: criticality:'Critical'
521 | Ex: criticality:'High'
522 | Ex: criticality:'Medium'
523 | Ex: criticality:'Low'
524 | Ex: criticality:'Unassigned'
525 | """
526 | ),
527 | (
528 | "internet_exposure",
529 | "String",
530 | "Yes",
531 | """
532 | Whether the unmanaged asset is exposed to the internet.
533 |
534 | Ex: internet_exposure:'Yes'
535 | Ex: internet_exposure:'No'
536 | Ex: internet_exposure:'Pending'
537 | Ex: internet_exposure:['Yes','Pending']
538 | """
539 | ),
540 | (
541 | "discovering_by",
542 | "String",
543 | "Yes",
544 | """
545 | Method by which the unmanaged asset was discovered.
546 |
547 | Ex: discovering_by:'Passive'
548 | Ex: discovering_by:'Active'
549 | Ex: discovering_by:['Passive','Active']
550 | """
551 | ),
552 | (
553 | "confidence",
554 | "Number",
555 | "Yes",
556 | """
557 | Confidence level of the unmanaged asset discovery (0-100).
558 | Higher values indicate higher confidence that the asset is real.
559 |
560 | Ex: confidence:>80
561 | Ex: confidence:>=90
562 | Ex: confidence:<50
563 | Ex: confidence:[80,90,95]
564 | """
565 | ),
566 | ]
567 |
568 | SEARCH_UNMANAGED_ASSETS_FQL_DOCUMENTATION = """Falcon Query Language (FQL) - Search Unmanaged Assets Guide
569 |
570 | === BASIC SYNTAX ===
571 | property_name:[operator]'value'
572 |
573 | === AVAILABLE OPERATORS ===
574 | • No operator = equals (default)
575 | • ! = not equal to
576 | • > = greater than
577 | • >= = greater than or equal
578 | • < = less than
579 | • <= = less than or equal
580 | • ~ = text match (ignores case, spaces, punctuation)
581 | • !~ = does not text match
582 |
583 | === DATA TYPES & SYNTAX ===
584 | • Strings: 'value' or ['exact_value'] for exact match
585 | • Dates: 'YYYY-MM-DDTHH:MM:SSZ' (UTC format)
586 | • Booleans: true or false (no quotes)
587 | • Numbers: 123 (no quotes)
588 |
589 | === COMBINING CONDITIONS ===
590 | • + = AND condition
591 | • , = OR condition
592 | • ( ) = Group expressions
593 |
594 | === AUTOMATIC FILTERING ===
595 | This tool automatically filters for unmanaged assets only by adding entity_type:'unmanaged' to all queries.
596 | You do not need to (and cannot) specify entity_type in your filter - it is always set to 'unmanaged'.
597 |
598 | === falcon_search_unmanaged_assets FQL filter options ===
599 |
600 | """ + generate_md_table(SEARCH_UNMANAGED_ASSETS_FQL_FILTERS) + """
601 |
602 | === IMPORTANT NOTES ===
603 | • entity_type:'unmanaged' is automatically applied - do not include in your filter
604 | • Use single quotes around string values: 'value'
605 | • Use square brackets for exact matches and multiple values: ['value1','value2']
606 | • Date format must be UTC: 'YYYY-MM-DDTHH:MM:SSZ'
607 | • Boolean values: true or false (no quotes)
608 | • Some fields require specific capitalization (check individual field descriptions)
609 |
610 | === COMMON FILTER EXAMPLES ===
611 | • Find Windows unmanaged assets: platform_name:'Windows'
612 | • Find high-confidence unmanaged assets: confidence:>80
613 | • Find recently discovered assets: first_seen_timestamp:>'now-7d'
614 | • Find assets by hostname pattern: hostname:*'PC-*'
615 | • Find critical unmanaged assets: criticality:'Critical'
616 | • Find servers: product_type_desc:'Server'
617 | • Find internet-exposed assets: internet_exposure:'Yes'
618 | • Find assets in specific network: external_ip:'192.168.1.0/24'
619 | • Find assets by manufacturer: system_manufacturer:*'Dell*'
620 | • Find recently seen assets: last_seen_timestamp:>'now-24h'
621 |
622 | === COMPLEX QUERY EXAMPLES ===
623 | • Windows workstations seen recently: platform_name:'Windows'+product_type_desc:'Workstation'+last_seen_timestamp:>'now-7d'
624 | • Critical servers with internet exposure: criticality:'Critical'+product_type_desc:'Server'+internet_exposure:'Yes'
625 | • Dell systems discovered this month: system_manufacturer:*'Dell*'+first_seen_timestamp:>'now-30d'
626 | """
627 |
```
--------------------------------------------------------------------------------
/tests/e2e/modules/test_hosts.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | E2E tests for the Hosts module.
3 | """
4 |
5 | import json
6 | import unittest
7 |
8 | import pytest
9 |
10 | from tests.e2e.utils.base_e2e_test import BaseE2ETest
11 |
12 |
13 | @pytest.mark.e2e
14 | class TestHostsModuleE2E(BaseE2ETest):
15 | """
16 | End-to-end test suite for the Falcon MCP Server Hosts Module.
17 | """
18 |
19 | def test_search_linux_servers(self):
20 | """Verify the agent can search for Linux servers and retrieve their details."""
21 |
22 | async def test_logic():
23 | fixtures = [
24 | {
25 | "operation": "QueryDevicesByFilter",
26 | "validator": lambda kwargs: "linux"
27 | in kwargs.get("parameters", {}).get("filter", "").lower()
28 | and "server"
29 | in kwargs.get("parameters", {}).get("filter", "").lower(),
30 | "response": {
31 | "status_code": 200,
32 | "body": {"resources": ["host-001", "host-002", "host-003"]},
33 | },
34 | },
35 | {
36 | "operation": "PostDeviceDetailsV2",
37 | "validator": lambda kwargs: "host-001"
38 | in kwargs.get("body", {}).get("ids", []),
39 | "response": {
40 | "status_code": 200,
41 | "body": {
42 | "resources": [
43 | {
44 | "device_id": "host-001",
45 | "hostname": "linux-server-01",
46 | "platform_name": "Linux",
47 | "product_type_desc": "Server",
48 | "os_version": "Ubuntu 20.04.3 LTS",
49 | "agent_version": "7.26.17905.0",
50 | "status": "normal",
51 | "last_seen": "2024-01-20T10:00:00Z",
52 | "first_seen": "2024-01-15T08:30:00Z",
53 | "external_ip": "203.0.113.10",
54 | "local_ip": "192.168.1.10",
55 | "machine_domain": "company.local",
56 | "system_manufacturer": "Dell Inc.",
57 | "system_product_name": "PowerEdge R740",
58 | },
59 | {
60 | "device_id": "host-002",
61 | "hostname": "linux-server-02",
62 | "platform_name": "Linux",
63 | "product_type_desc": "Server",
64 | "os_version": "CentOS Linux 8.4",
65 | "agent_version": "7.26.17905.0",
66 | "status": "normal",
67 | "last_seen": "2024-01-20T09:45:00Z",
68 | "first_seen": "2024-01-10T14:20:00Z",
69 | "external_ip": "203.0.113.11",
70 | "local_ip": "192.168.1.11",
71 | "machine_domain": "company.local",
72 | "system_manufacturer": "HPE",
73 | "system_product_name": "ProLiant DL380",
74 | },
75 | {
76 | "device_id": "host-003",
77 | "hostname": "linux-server-03",
78 | "platform_name": "Linux",
79 | "product_type_desc": "Server",
80 | "os_version": "Red Hat Enterprise Linux 8.5",
81 | "agent_version": "7.25.16803.0",
82 | "status": "normal",
83 | "last_seen": "2024-01-20T09:30:00Z",
84 | "first_seen": "2024-01-12T11:15:00Z",
85 | "external_ip": "203.0.113.12",
86 | "local_ip": "192.168.1.12",
87 | "machine_domain": "company.local",
88 | "system_manufacturer": "Lenovo",
89 | "system_product_name": "ThinkSystem SR650",
90 | },
91 | ]
92 | },
93 | },
94 | },
95 | ]
96 |
97 | self._mock_api_instance.command.side_effect = (
98 | self._create_mock_api_side_effect(fixtures)
99 | )
100 |
101 | prompt = "Find all Linux servers in our environment and show me their hostnames, IP addresses, and agent versions"
102 | return await self._run_agent_stream(prompt)
103 |
104 | def assertions(tools, result):
105 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
106 | used_tool = tools[len(tools) - 1]
107 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_hosts")
108 |
109 | # Check for Linux and server filtering
110 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
111 | self.assertTrue(
112 | "linux" in tool_input_str and "server" in tool_input_str,
113 | f"Expected Linux server filtering in tool input: {tool_input_str}",
114 | )
115 |
116 | # Verify all three hosts are in the output
117 | self.assertIn("linux-server-01", used_tool["output"])
118 | self.assertIn("linux-server-02", used_tool["output"])
119 | self.assertIn("linux-server-03", used_tool["output"])
120 |
121 | # Verify API calls were made correctly
122 | self.assertGreaterEqual(
123 | self._mock_api_instance.command.call_count, 2, "Expected 2 API calls",
124 | )
125 |
126 | # Check first API call (QueryDevicesByFilter)
127 | api_call_1_params = self._mock_api_instance.command.call_args_list[0][
128 | 1
129 | ].get("parameters", {})
130 | filter_str = api_call_1_params.get("filter", "").lower()
131 | self.assertTrue(
132 | "linux" in filter_str and "server" in filter_str,
133 | f"Expected Linux server filtering in API call: {filter_str}",
134 | )
135 |
136 | # Check second API call (PostDeviceDetailsV2)
137 | api_call_2_body = self._mock_api_instance.command.call_args_list[1][1].get(
138 | "body", {}
139 | )
140 | expected_ids = ["host-001", "host-002", "host-003"]
141 | self.assertEqual(api_call_2_body.get("ids"), expected_ids)
142 |
143 | # Verify result contains expected information
144 | self.assertIn("linux-server-01", result)
145 | self.assertIn("linux-server-02", result)
146 | self.assertIn("linux-server-03", result)
147 | self.assertIn("192.168.1.", result) # Should contain IP addresses
148 | self.assertIn("7.26.", result) # Should contain agent versions
149 |
150 | self.run_test_with_retries("test_search_linux_servers", test_logic, assertions)
151 |
152 | def test_get_specific_host_details(self):
153 | """Verify the agent can get details for specific host IDs."""
154 |
155 | async def test_logic():
156 | fixtures = [
157 | {
158 | "operation": "PostDeviceDetailsV2",
159 | "validator": lambda kwargs: "host-windows-001"
160 | in kwargs.get("body", {}).get("ids", []),
161 | "response": {
162 | "status_code": 200,
163 | "body": {
164 | "resources": [
165 | {
166 | "device_id": "host-windows-001",
167 | "hostname": "DESKTOP-WIN10-01",
168 | "platform_name": "Windows",
169 | "product_type_desc": "Workstation",
170 | "os_version": "Windows 10 Enterprise",
171 | "major_version": "10",
172 | "minor_version": "0",
173 | "agent_version": "7.26.17905.0",
174 | "status": "normal",
175 | "last_seen": "2024-01-20T11:15:00Z",
176 | "first_seen": "2024-01-18T09:00:00Z",
177 | "external_ip": "203.0.113.20",
178 | "local_ip": "192.168.1.20",
179 | "mac_address": "00:50:56:C0:00:08",
180 | "machine_domain": "CORPORATE",
181 | "system_manufacturer": "VMware, Inc.",
182 | "system_product_name": "VMware Virtual Platform",
183 | "bios_manufacturer": "Phoenix Technologies LTD",
184 | "bios_version": "6.00",
185 | "serial_number": "VMware-56-4d-xx-xx-xx-xx",
186 | "reduced_functionality_mode": "no",
187 | "filesystem_containment_status": "normal",
188 | }
189 | ]
190 | },
191 | },
192 | }
193 | ]
194 |
195 | self._mock_api_instance.command.side_effect = (
196 | self._create_mock_api_side_effect(fixtures)
197 | )
198 |
199 | prompt = "Get detailed information for host ID 'host-windows-001', including its hostname, platform, and containment status"
200 | return await self._run_agent_stream(prompt)
201 |
202 | def assertions(tools, result):
203 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
204 | used_tool = tools[len(tools) - 1]
205 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_get_host_details")
206 |
207 | # Check that the specific host ID was used
208 | tool_input = used_tool["input"]["tool_input"]
209 | self.assertIn("host-windows-001", json.dumps(tool_input))
210 |
211 | # Verify host details are in the output
212 | self.assertIn("DESKTOP-WIN10-01", used_tool["output"])
213 | self.assertIn("Windows", used_tool["output"])
214 | self.assertIn("host-windows-001", used_tool["output"])
215 |
216 | # Verify API call was made correctly
217 | self.assertGreaterEqual(
218 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
219 | )
220 |
221 | # Check API call (PostDeviceDetailsV2)
222 | api_call_body = self._mock_api_instance.command.call_args_list[0][1].get(
223 | "body", {}
224 | )
225 | self.assertIn("host-windows-001", api_call_body.get("ids", []))
226 |
227 | # Verify result contains expected information
228 | self.assertIn("DESKTOP-WIN10-01", result)
229 | self.assertIn("Windows", result)
230 | self.assertIn("normal", result) # Status and containment status
231 | self.assertIn("192.168.1.20", result) # Local IP
232 |
233 | self.run_test_with_retries(
234 | "test_get_specific_host_details", test_logic, assertions
235 | )
236 |
237 | def test_search_azure_cloud_hosts(self):
238 | """Verify the agent can search for cloud hosts with complex filtering."""
239 |
240 | async def test_logic():
241 | fixtures = [
242 | {
243 | "operation": "QueryDevicesByFilter",
244 | "validator": lambda kwargs: "azure"
245 | in kwargs.get("parameters", {}).get("filter", "").lower(),
246 | "response": {
247 | "status_code": 200,
248 | "body": {"resources": ["azure-host-001"]},
249 | },
250 | },
251 | {
252 | "operation": "PostDeviceDetailsV2",
253 | "validator": lambda kwargs: "azure-host-001"
254 | in kwargs.get("body", {}).get("ids", []),
255 | "response": {
256 | "status_code": 200,
257 | "body": {
258 | "resources": [
259 | {
260 | "device_id": "azure-host-001",
261 | "hostname": "azure-vm-debian",
262 | "platform_name": "Linux",
263 | "product_type_desc": "Server",
264 | "os_version": "Debian GNU 12",
265 | "kernel_version": "6.11.0-1015-azure",
266 | "agent_version": "7.26.17905.0",
267 | "status": "normal",
268 | "last_seen": "2024-01-20T12:00:00Z",
269 | "first_seen": "2024-01-19T10:30:00Z",
270 | "external_ip": "20.45.123.45",
271 | "connection_ip": "172.18.0.2",
272 | "default_gateway_ip": "172.18.0.1",
273 | "service_provider": "AZURE",
274 | "service_provider_account_id": "99841e6a-b123-4567-8901-123456789abc",
275 | "instance_id": "f9d3cef9-0123-4567-8901-123456789def",
276 | "system_manufacturer": "Microsoft Corporation",
277 | "system_product_name": "Virtual Machine",
278 | "deployment_type": "DaemonSet",
279 | "linux_sensor_mode": "User Mode",
280 | "reduced_functionality_mode": "yes",
281 | "k8s_cluster_id": "ecbb9795-9123-4567-8901-123456789ghi",
282 | "tags": ["SensorGroupingTags/daemonset"],
283 | }
284 | ]
285 | },
286 | },
287 | },
288 | ]
289 |
290 | self._mock_api_instance.command.side_effect = (
291 | self._create_mock_api_side_effect(fixtures)
292 | )
293 |
294 | prompt = "Find Azure cloud hosts and show their deployment details including Kubernetes cluster information"
295 | return await self._run_agent_stream(prompt)
296 |
297 | def assertions(tools, result):
298 | self.assertGreaterEqual(len(tools), 1, "Expected at least 1 tool call")
299 |
300 | # Find a search hosts tool call (may not be the last one)
301 | search_tool = None
302 | for tool in tools:
303 | if tool["input"]["tool_name"] == "falcon_search_hosts":
304 | search_tool = tool
305 | break
306 |
307 | self.assertIsNotNone(
308 | search_tool, "Expected at least one falcon_search_hosts tool call"
309 | )
310 |
311 | # Check for Azure filtering in any tool call
312 | found_azure_filtering = False
313 | for tool in tools:
314 | tool_input_str = json.dumps(tool["input"]["tool_input"]).lower()
315 | if "azure" in tool_input_str:
316 | found_azure_filtering = True
317 | break
318 |
319 | self.assertTrue(
320 | found_azure_filtering, "Expected Azure filtering in tool inputs"
321 | )
322 |
323 | # Verify Azure host is in the search tool output
324 | self.assertIn("azure-vm-debian", search_tool["output"])
325 | self.assertIn("AZURE", search_tool["output"])
326 |
327 | # Verify API calls were made correctly
328 | self.assertGreaterEqual(
329 | self._mock_api_instance.command.call_count, 2, "Expected 2 API calls"
330 | )
331 |
332 | # Check that we have QueryDevicesByFilter call with Azure filtering
333 | found_azure_query = False
334 | found_details_call = False
335 |
336 | for call in self._mock_api_instance.command.call_args_list:
337 | if call[0][0] == "QueryDevicesByFilter":
338 | filter_str = call[1].get("parameters", {}).get("filter", "").lower()
339 | if "azure" in filter_str:
340 | found_azure_query = True
341 | elif call[0][0] == "PostDeviceDetailsV2":
342 | if "azure-host-001" in call[1].get("body", {}).get("ids", []):
343 | found_details_call = True
344 |
345 | self.assertTrue(
346 | found_azure_query,
347 | "Expected QueryDevicesByFilter call with Azure filtering",
348 | )
349 | self.assertTrue(
350 | found_details_call,
351 | "Expected PostDeviceDetailsV2 call with azure-host-001",
352 | )
353 |
354 | # Verify result contains expected Azure and Kubernetes information (more flexible matching)
355 | result_lower = result.lower()
356 | self.assertIn("azure-vm-debian", result_lower)
357 | self.assertIn("azure", result_lower)
358 | self.assertIn("daemonset", result_lower)
359 | # Check for Kubernetes info (could be "k8s" or "kubernetes")
360 | self.assertTrue(
361 | "k8s" in result_lower or "kubernetes" in result_lower,
362 | f"Expected Kubernetes cluster info in result: {result_lower[:500]}...",
363 | )
364 |
365 | self.run_test_with_retries(
366 | "test_search_azure_cloud_hosts", test_logic, assertions
367 | )
368 |
369 |
370 | if __name__ == "__main__":
371 | unittest.main()
372 |
```
--------------------------------------------------------------------------------
/tests/e2e/modules/test_discover.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | E2E tests for the Discover module.
3 | """
4 |
5 | import json
6 | import unittest
7 |
8 | import pytest
9 |
10 | from tests.e2e.utils.base_e2e_test import BaseE2ETest
11 |
12 |
13 | @pytest.mark.e2e
14 | class TestDiscoverModuleE2E(BaseE2ETest):
15 | """
16 | End-to-end test suite for the Falcon MCP Server Discover Module.
17 | """
18 |
19 | def test_search_applications_by_category(self):
20 | """Verify the agent can search for applications by name."""
21 |
22 | async def test_logic():
23 | fixtures = [
24 | {
25 | "operation": "combined_applications",
26 | "validator": lambda kwargs: "category:'Web Browsers'"
27 | in kwargs.get("parameters", {}).get("filter", ""),
28 | "response": {
29 | "status_code": 200,
30 | "body": {
31 | "resources": [
32 | {
33 | "id": "abc123_def456789abcdef123456789abcdef123456789abcdef123456789abcdef",
34 | "cid": "abc123",
35 | "name": "Chrome Browser",
36 | "vendor": "Google",
37 | "version": "120.0.6099.130",
38 | "software_type": "application",
39 | "name_vendor": "Chrome Browser-Google",
40 | "name_vendor_version": "Chrome Browser-Google-120.0.6099.130",
41 | "versioning_scheme": "semver",
42 | "groups": [
43 | "group1",
44 | "group2",
45 | "group3",
46 | ],
47 | "category": "Web Browsers",
48 | "architectures": [
49 | "x64",
50 | ],
51 | "first_seen_timestamp": "2025-02-15T10:30:00Z",
52 | "last_updated_timestamp": "2025-03-01T14:45:22Z",
53 | "is_suspicious": False,
54 | "is_normalized": True,
55 | "host": {
56 | "id": "abc123_xyz789",
57 | },
58 | },
59 | {
60 | "id": "def456_123456789abcdef123456789abcdef123456789abcdef123456789abcdef",
61 | "cid": "def456",
62 | "name": "Chrome Browser",
63 | "vendor": "Google",
64 | "version": "119.0.6045.199",
65 | "software_type": "application",
66 | "name_vendor": "Chrome Browser-Google",
67 | "name_vendor_version": "Chrome Browser-Google-119.0.6045.199",
68 | "versioning_scheme": "semver",
69 | "groups": [
70 | "group4",
71 | "group5",
72 | ],
73 | "category": "Web Browsers",
74 | "architectures": [
75 | "x64",
76 | ],
77 | "first_seen_timestamp": "2025-01-10T08:15:30Z",
78 | "last_updated_timestamp": "2025-02-20T11:22:45Z",
79 | "is_suspicious": False,
80 | "is_normalized": True,
81 | "host": {
82 | "id": "def456_abc123",
83 | },
84 | },
85 | ]
86 | },
87 | },
88 | }
89 | ]
90 |
91 | self._mock_api_instance.command.side_effect = self._create_mock_api_side_effect(
92 | fixtures
93 | )
94 |
95 | prompt = "Search for all applications categorized as Web Browsers in our environment and show me their details"
96 | return await self._run_agent_stream(prompt)
97 |
98 | def assertions(tools, result):
99 | tool_names_called = [tool["input"]["tool_name"] for tool in tools]
100 | self.assertIn("falcon_search_applications_fql_guide", tool_names_called)
101 | self.assertIn("falcon_search_applications", tool_names_called)
102 |
103 | used_tool = tools[len(tools) - 1]
104 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_applications")
105 |
106 | # Check for name filtering
107 | tool_input_str = json.dumps(used_tool["input"]["tool_input"]).lower()
108 | self.assertTrue(
109 | "web browsers" in tool_input_str,
110 | f"Expected web browsers category filtering in tool input: {tool_input_str}",
111 | )
112 |
113 | # Verify both applications are in the output
114 | self.assertIn("Chrome Browser", used_tool["output"])
115 | self.assertIn("Google", used_tool["output"])
116 | self.assertIn("120.0.6099.130", used_tool["output"])
117 | self.assertIn("119.0.6045.199", used_tool["output"])
118 |
119 | # Verify API call was made correctly
120 | self.assertGreaterEqual(
121 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
122 | )
123 |
124 | # Check API call (combined_applications)
125 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
126 | "parameters", {}
127 | )
128 | filter_str = api_call_params.get("filter", "").lower()
129 | self.assertTrue(
130 | "category" in filter_str and "web browsers" in filter_str,
131 | f"Expected category:Web Browsers filtering in API call: {filter_str}",
132 | )
133 |
134 | # Verify result contains expected information
135 | self.assertIn("Chrome Browser", result)
136 | self.assertIn("Google", result)
137 | self.assertIn("120.0.6099.130", result)
138 | self.assertIn("119.0.6045.199", result)
139 | self.assertIn("Web Browsers", result)
140 |
141 | self.run_test_with_retries("test_search_applications_by_category", test_logic, assertions)
142 |
143 | def test_search_unmanaged_assets_by_platform(self):
144 | """Verify the agent can search for unmanaged assets by platform."""
145 |
146 | async def test_logic():
147 | fixtures = [
148 | {
149 | "operation": "combined_hosts",
150 | "validator": lambda kwargs: "entity_type:'unmanaged'"
151 | in kwargs.get("parameters", {}).get("filter", "")
152 | and (
153 | "platform_name:'Windows'" in kwargs.get("parameters", {}).get("filter", "")
154 | ),
155 | "response": {
156 | "status_code": 200,
157 | "body": {
158 | "resources": [
159 | {
160 | "id": "abc123def456789_1234567890abcdef1234567890abcdef1234567890abcdef",
161 | "cid": "abc123def456789",
162 | "entity_type": "unmanaged",
163 | "first_seen_timestamp": "2025-05-16T04:00:00Z",
164 | "last_seen_timestamp": "2025-08-12T23:00:00Z",
165 | "system_manufacturer": "VMware, Inc.",
166 | "hostname": "PC-FINANCE-W11",
167 | "local_ips_count": 1,
168 | "network_interfaces": [
169 | {
170 | "local_ip": "192.168.1.100",
171 | "mac_address": "AA-BB-CC-DD-EE-01",
172 | "network_prefix": "192.168",
173 | }
174 | ],
175 | "os_security": {},
176 | "current_local_ip": "192.168.1.100",
177 | "data_providers": ["Falcon passive discovery"],
178 | "data_providers_count": 1,
179 | "first_discoverer_aid": "abc123456789def0123456789abcdef01",
180 | "last_discoverer_aid": "abc123456789def0123456789abcdef01",
181 | "discoverer_count": 1,
182 | "discoverer_aids": ["abc123456789def0123456789abcdef01"],
183 | "discoverer_tags": [
184 | "FalconGroupingTags/Finance",
185 | "FalconGroupingTags/Workstation",
186 | "FalconGroupingTags/Windows11",
187 | ],
188 | "discoverer_platform_names": ["Windows"],
189 | "discoverer_product_type_descs": ["Workstation"],
190 | "discoverer_hostnames": ["WIN-MGMT-001"],
191 | "last_discoverer_hostname": "WIN-MGMT-001",
192 | "confidence": 75,
193 | "active_discovery": {},
194 | },
195 | {
196 | "id": "abc123def456789_fedcba0987654321fedcba0987654321fedcba0987654321",
197 | "cid": "abc123def456789",
198 | "entity_type": "unmanaged",
199 | "first_seen_timestamp": "2025-07-16T10:00:00Z",
200 | "last_seen_timestamp": "2025-08-12T23:00:00Z",
201 | "system_manufacturer": "Dell Inc.",
202 | "hostname": "SERVER-HR-002",
203 | "local_ips_count": 1,
204 | "network_interfaces": [
205 | {
206 | "local_ip": "192.168.2.50",
207 | "mac_address": "AA-BB-CC-DD-EE-02",
208 | "network_prefix": "192.168",
209 | }
210 | ],
211 | "os_security": {},
212 | "current_local_ip": "192.168.2.50",
213 | "data_providers": ["Falcon passive discovery"],
214 | "data_providers_count": 1,
215 | "first_discoverer_aid": "def456789abc012def456789abc012de",
216 | "last_discoverer_aid": "def456789abc012def456789abc012de",
217 | "discoverer_count": 1,
218 | "discoverer_aids": ["def456789abc012def456789abc012de"],
219 | "discoverer_tags": [
220 | "FalconGroupingTags/HR",
221 | "FalconGroupingTags/Server",
222 | "FalconGroupingTags/WindowsServer2019",
223 | ],
224 | "discoverer_platform_names": ["Windows"],
225 | "discoverer_product_type_descs": ["Server"],
226 | "discoverer_hostnames": ["WIN-DC-001"],
227 | "last_discoverer_hostname": "WIN-DC-001",
228 | "confidence": 85,
229 | "active_discovery": {},
230 | },
231 | ]
232 | },
233 | },
234 | }
235 | ]
236 |
237 | self._mock_api_instance.command.side_effect = self._create_mock_api_side_effect(
238 | fixtures
239 | )
240 |
241 | prompt = "Search for all unmanaged Windows assets in our environment and show me their details"
242 | return await self._run_agent_stream(prompt)
243 |
244 | def assertions(tools, result):
245 | tool_names_called = [tool["input"]["tool_name"] for tool in tools]
246 | # Agent must consult the FQL guide to learn proper platform filtering syntax
247 | self.assertIn("falcon_search_unmanaged_assets_fql_guide", tool_names_called)
248 | self.assertIn("falcon_search_unmanaged_assets", tool_names_called)
249 |
250 | used_tool = tools[len(tools) - 1]
251 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_unmanaged_assets")
252 |
253 | # Note: Agent may interpret platform filtering differently
254 | # The key behavior is that it successfully finds and returns unmanaged assets
255 |
256 | # Verify both unmanaged assets are in the output
257 | self.assertIn("PC-FINANCE-W11", used_tool["output"])
258 | self.assertIn("SERVER-HR-002", used_tool["output"])
259 | self.assertIn("VMware, Inc.", used_tool["output"])
260 | self.assertIn("Dell Inc.", used_tool["output"])
261 | self.assertIn("unmanaged", used_tool["output"])
262 |
263 | # Verify API call was made correctly
264 | self.assertGreaterEqual(
265 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
266 | )
267 |
268 | # Check API call (combined_hosts)
269 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
270 | "parameters", {}
271 | )
272 | filter_str = api_call_params.get("filter", "").lower()
273 |
274 | # Verify entity_type:'unmanaged' is automatically added
275 | self.assertTrue(
276 | "entity_type:'unmanaged'" in filter_str,
277 | f"Expected entity_type:'unmanaged' in API call filter: {filter_str}",
278 | )
279 |
280 | # Note: Platform filtering may vary based on agent interpretation
281 | # The core requirement is that entity_type:'unmanaged' is enforced
282 |
283 | # Verify result contains expected information
284 | self.assertIn("PC-FINANCE-W11", result)
285 | self.assertIn("SERVER-HR-002", result)
286 | self.assertIn("Windows", result)
287 | self.assertIn("unmanaged", result)
288 | self.assertIn("Workstation", result)
289 | self.assertIn("Server", result)
290 |
291 | self.run_test_with_retries(
292 | "test_search_unmanaged_assets_by_platform", test_logic, assertions
293 | )
294 |
295 | def test_search_unmanaged_assets_by_confidence(self):
296 | """Verify the agent can search for unmanaged assets by confidence level."""
297 |
298 | async def test_logic():
299 | fixtures = [
300 | {
301 | "operation": "combined_hosts",
302 | "validator": lambda kwargs: "entity_type:'unmanaged'"
303 | in kwargs.get("parameters", {}).get("filter", "")
304 | and ("confidence:" in kwargs.get("parameters", {}).get("filter", "")),
305 | "response": {
306 | "status_code": 200,
307 | "body": {
308 | "resources": [
309 | {
310 | "id": "def789ghi012345_abcdef123456789abcdef123456789abcdef123456789abcdef",
311 | "cid": "def789ghi012345",
312 | "entity_type": "unmanaged",
313 | "first_seen_timestamp": "2025-07-17T08:00:00Z",
314 | "last_seen_timestamp": "2025-08-12T23:00:00Z",
315 | "system_manufacturer": "VMware, Inc.",
316 | "hostname": "PROD-DB-LINUX",
317 | "local_ips_count": 1,
318 | "network_interfaces": [
319 | {
320 | "local_ip": "10.0.1.200",
321 | "mac_address": "AA-BB-CC-DD-EE-03",
322 | "network_prefix": "10.0",
323 | }
324 | ],
325 | "os_security": {},
326 | "current_local_ip": "10.0.1.200",
327 | "data_providers": ["Falcon passive discovery"],
328 | "data_providers_count": 1,
329 | "first_discoverer_aid": "123456789def012345678901234567ab",
330 | "last_discoverer_aid": "123456789def012345678901234567ab",
331 | "discoverer_count": 1,
332 | "discoverer_aids": ["123456789def012345678901234567ab"],
333 | "discoverer_tags": [
334 | "FalconGroupingTags/Production",
335 | "FalconGroupingTags/Database",
336 | "FalconGroupingTags/Linux",
337 | "FalconGroupingTags/Critical-Infrastructure",
338 | ],
339 | "discoverer_platform_names": ["Linux"],
340 | "discoverer_product_type_descs": ["Server"],
341 | "discoverer_hostnames": ["LNX-MGMT-001"],
342 | "last_discoverer_hostname": "LNX-MGMT-001",
343 | "confidence": 95,
344 | "active_discovery": {},
345 | }
346 | ]
347 | },
348 | },
349 | }
350 | ]
351 |
352 | self._mock_api_instance.command.side_effect = self._create_mock_api_side_effect(
353 | fixtures
354 | )
355 |
356 | prompt = "Find all unmanaged assets with high confidence levels (above 80) that are likely real systems"
357 | return await self._run_agent_stream(prompt)
358 |
359 | def assertions(tools, result):
360 | tool_names_called = [tool["input"]["tool_name"] for tool in tools]
361 | self.assertIn("falcon_search_unmanaged_assets", tool_names_called)
362 |
363 | used_tool = tools[len(tools) - 1]
364 | self.assertEqual(used_tool["input"]["tool_name"], "falcon_search_unmanaged_assets")
365 |
366 | # Note: Agent may interpret confidence filtering differently
367 | # The key behavior is that it successfully finds and returns unmanaged assets
368 |
369 | # Verify high confidence asset is in the output
370 | self.assertIn("PROD-DB-LINUX", used_tool["output"])
371 | self.assertIn("95", used_tool["output"])
372 | self.assertIn("unmanaged", used_tool["output"])
373 |
374 | # Verify API call was made correctly
375 | self.assertGreaterEqual(
376 | self._mock_api_instance.command.call_count, 1, "Expected 1 API call"
377 | )
378 |
379 | # Check API call (combined_hosts)
380 | api_call_params = self._mock_api_instance.command.call_args_list[0][1].get(
381 | "parameters", {}
382 | )
383 | filter_str = api_call_params.get("filter", "").lower()
384 |
385 | # Verify entity_type:'unmanaged' is automatically added
386 | self.assertTrue(
387 | "entity_type:'unmanaged'" in filter_str,
388 | f"Expected entity_type:'unmanaged' in API call filter: {filter_str}",
389 | )
390 |
391 | # Note: Confidence filtering may vary based on agent interpretation
392 | # The core requirement is that entity_type:'unmanaged' is enforced
393 |
394 | # Verify result contains expected information
395 | self.assertIn("PROD-DB-LINUX", result)
396 | self.assertIn("95", result)
397 | self.assertIn("unmanaged", result)
398 | self.assertIn("Linux", result)
399 |
400 | self.run_test_with_retries(
401 | "test_search_unmanaged_assets_by_confidence", test_logic, assertions
402 | )
403 |
404 |
405 | if __name__ == "__main__":
406 | unittest.main()
407 |
```