This is page 5 of 5. Use http://codebase.md/crowdstrike/falcon-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .env.dev.example
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug.yaml
│ │ ├── config.yml
│ │ ├── feature-request.yaml
│ │ └── question.yaml
│ └── workflows
│ ├── docker-build-push.yml
│ ├── docker-build-test.yml
│ ├── markdown-lint.yml
│ ├── python-lint.yml
│ ├── python-test-e2e.yml
│ ├── python-test.yml
│ └── release.yml
├── .gitignore
├── .markdownlint.json
├── CHANGELOG.md
├── Dockerfile
├── docs
│ ├── CODE_OF_CONDUCT.md
│ ├── CONTRIBUTING.md
│ ├── deployment
│ │ ├── amazon_bedrock_agentcore.md
│ │ └── google_cloud.md
│ ├── e2e_testing.md
│ ├── module_development.md
│ ├── resource_development.md
│ └── SECURITY.md
├── examples
│ ├── adk
│ │ ├── adk_agent_operations.sh
│ │ ├── falcon_agent
│ │ │ ├── __init__.py
│ │ │ ├── agent.py
│ │ │ ├── env.properties
│ │ │ └── requirements.txt
│ │ └── README.md
│ ├── basic_usage.py
│ ├── mcp_config.json
│ ├── sse_usage.py
│ └── streamable_http_usage.py
├── falcon_mcp
│ ├── __init__.py
│ ├── client.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── api_scopes.py
│ │ ├── errors.py
│ │ ├── logging.py
│ │ └── utils.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── detections.py
│ │ ├── discover.py
│ │ ├── hosts.py
│ │ ├── idp.py
│ │ ├── incidents.py
│ │ ├── intel.py
│ │ ├── sensor_usage.py
│ │ ├── serverless.py
│ │ └── spotlight.py
│ ├── registry.py
│ ├── resources
│ │ ├── __init__.py
│ │ ├── cloud.py
│ │ ├── detections.py
│ │ ├── discover.py
│ │ ├── hosts.py
│ │ ├── incidents.py
│ │ ├── intel.py
│ │ ├── sensor_usage.py
│ │ ├── serverless.py
│ │ └── spotlight.py
│ └── server.py
├── LICENSE
├── pyproject.toml
├── README.md
├── scripts
│ ├── generate_e2e_report.py
│ └── test_results_viewer.html
├── SUPPORT.md
├── tests
│ ├── __init__.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── test_api_scopes.py
│ │ ├── test_errors.py
│ │ ├── test_logging.py
│ │ └── test_utils.py
│ ├── conftest.py
│ ├── e2e
│ │ ├── __init__.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── test_cloud.py
│ │ │ ├── test_detections.py
│ │ │ ├── test_discover.py
│ │ │ ├── test_hosts.py
│ │ │ ├── test_idp.py
│ │ │ ├── test_incidents.py
│ │ │ ├── test_intel.py
│ │ │ ├── test_sensor_usage.py
│ │ │ ├── test_serverless.py
│ │ │ └── test_spotlight.py
│ │ └── utils
│ │ ├── __init__.py
│ │ └── base_e2e_test.py
│ ├── modules
│ │ ├── __init__.py
│ │ ├── test_base.py
│ │ ├── test_cloud.py
│ │ ├── test_detections.py
│ │ ├── test_discover.py
│ │ ├── test_hosts.py
│ │ ├── test_idp.py
│ │ ├── test_incidents.py
│ │ ├── test_intel.py
│ │ ├── test_sensor_usage.py
│ │ ├── test_serverless.py
│ │ ├── test_spotlight.py
│ │ └── utils
│ │ └── test_modules.py
│ ├── test_client.py
│ ├── test_registry.py
│ ├── test_server.py
│ └── test_streamable_http_transport.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/falcon_mcp/resources/detections.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Contains Detections resources.
3 | """
4 |
5 | from falcon_mcp.common.utils import generate_md_table
6 |
7 | # List of tuples containing filter options data: (name, type, description)
8 | SEARCH_DETECTIONS_FQL_FILTERS = [
9 | (
10 | "Name",
11 | "Type",
12 | "Description"
13 | ),
14 | (
15 | "agent_id",
16 | "String",
17 | """
18 | Agent ID associated with the alert.
19 | Ex: 77d11725xxxxxxxxxxxxxxxxxxxxc48ca19
20 | """
21 | ),
22 | (
23 | "aggregate_id",
24 | "String",
25 | """
26 | Unique identifier linking multiple related alerts
27 | that represent a logical grouping (like legacy
28 | detection_id). Use this to correlate related alerts.
29 | Ex: aggind:77d1172532c8xxxxxxxxxxxxxxxxxxxx49030016385
30 | """
31 | ),
32 | (
33 | "composite_id",
34 | "String",
35 | """
36 | Global unique identifier for the individual alert.
37 | This replaces the legacy detection_id for individual
38 | alerts in the new Alerts API.
39 | Ex: d615:ind:77d1172xxxxxxxxxxxxxxxxx6c48ca19
40 | """
41 | ),
42 | (
43 | "cid",
44 | "String",
45 | """
46 | Customer ID.
47 | Ex: d61501xxxxxxxxxxxxxxxxxxxxa2da2158
48 | """
49 | ),
50 | (
51 | "pattern_id",
52 | "Number",
53 | """
54 | Detection pattern identifier.
55 | Ex: 67
56 | """
57 | ),
58 | (
59 | "assigned_to_name",
60 | "String",
61 | """
62 | Name of assigned Falcon user.
63 | Ex: Alice Anderson
64 | """
65 | ),
66 | (
67 | "assigned_to_uid",
68 | "String",
69 | """
70 | User ID of assigned Falcon user.
71 | Ex: [email protected]
72 | """
73 | ),
74 | (
75 | "assigned_to_uuid",
76 | "String",
77 | """
78 | UUID of assigned Falcon user.
79 | Ex: dc54xxxxxxxxxxxxxxxx1658
80 | """
81 | ),
82 | (
83 | "status",
84 | "String",
85 | """
86 | Alert status. Possible values:
87 | - new: Newly detected, needs triage
88 | - in_progress: Being investigated
89 | - closed: Investigation completed
90 | - reopened: Previously closed, now active again
91 | Ex: new
92 | """
93 | ),
94 | (
95 | "created_timestamp",
96 | "Timestamp",
97 | """
98 | When alert was created in UTC format.
99 | Ex: 2024-02-22T14:16:04.973070837Z
100 | """
101 | ),
102 | (
103 | "updated_timestamp",
104 | "Timestamp",
105 | """
106 | Last modification time in UTC format.
107 | Ex: 2024-02-22T15:15:05.637481021Z
108 | """
109 | ),
110 | (
111 | "timestamp",
112 | "Timestamp",
113 | """
114 | Alert occurrence timestamp in UTC format.
115 | Ex: 2024-02-22T14:15:03.112Z
116 | """
117 | ),
118 | (
119 | "crawled_timestamp",
120 | "Timestamp",
121 | """
122 | Internal timestamp for processing in UTC format.
123 | Ex: 2024-02-22T15:15:05.637684718Z
124 | """
125 | ),
126 | (
127 | "confidence",
128 | "Number",
129 | """
130 | Confidence level (1-100). Higher values indicate
131 | greater confidence in the detection.
132 | Ex: 80
133 | """
134 | ),
135 | (
136 | "severity",
137 | "Number",
138 | """
139 | Security risk level (1-100). Use numeric values:
140 | Ex: 90
141 | """
142 | ),
143 | (
144 | "severity_name",
145 | "String",
146 | """
147 | Human-readable severity level name. Easier to use
148 | than numeric ranges. Possible values:
149 | - Informational: Low-priority alerts
150 | - Low: Minor security concerns
151 | - Medium: Moderate security risks
152 | - High: Significant security threats
153 | - Critical: Severe security incidents
154 | Ex: High
155 | """
156 | ),
157 | (
158 | "tactic",
159 | "String",
160 | """
161 | MITRE ATT&CK tactic name.
162 | Ex: Credential Access
163 | """
164 | ),
165 | (
166 | "tactic_id",
167 | "String",
168 | """
169 | MITRE ATT&CK tactic identifier.
170 | Ex: TA0006
171 | """
172 | ),
173 | (
174 | "technique",
175 | "String",
176 | """
177 | MITRE ATT&CK technique name.
178 | Ex: OS Credential Dumping
179 | """
180 | ),
181 | (
182 | "technique_id",
183 | "String",
184 | """
185 | MITRE ATT&CK technique identifier.
186 | Ex: T1003
187 | """
188 | ),
189 | (
190 | "objective",
191 | "String",
192 | """
193 | Attack objective description.
194 | Ex: Gain Access
195 | """
196 | ),
197 | (
198 | "scenario",
199 | "String",
200 | """
201 | Detection scenario classification.
202 | Ex: credential_theft
203 | """
204 | ),
205 | (
206 | "product",
207 | "String",
208 | """
209 | Source Falcon product. Possible values:
210 | - epp: Endpoint Protection Platform
211 | - idp: Identity Protection
212 | - mobile: Mobile Device Protection
213 | - xdr: Extended Detection and Response
214 | - overwatch: Managed Threat Hunting
215 | - cwpp: Cloud Workload Protection
216 | - ngsiem: Next-Gen SIEM
217 | - thirdparty: Third-party integrations
218 | - data-protection: Data Loss Prevention
219 | Ex: epp
220 | """
221 | ),
222 | (
223 | "platform",
224 | "String",
225 | """
226 | Operating system platform.
227 | Ex: Windows, Linux, Mac
228 | """
229 | ),
230 | (
231 | "data_domains",
232 | "Array",
233 | """
234 | Domain to which this alert belongs to. Possible
235 | values: Endpoint, Identity, Cloud, Email, Web,
236 | Network (array field).
237 | Ex: ["Endpoint"]
238 | """
239 | ),
240 | (
241 | "source_products",
242 | "Array",
243 | """
244 | Products associated with the source of this alert
245 | (array field).
246 | Ex: ["Falcon Insight"]
247 | """
248 | ),
249 | (
250 | "source_vendors",
251 | "Array",
252 | """
253 | Vendors associated with the source of this alert
254 | (array field).
255 | Ex: ["CrowdStrike"]
256 | """
257 | ),
258 | (
259 | "name",
260 | "String",
261 | """
262 | Detection pattern name.
263 | Ex: NtdsFileAccessedViaVss
264 | """
265 | ),
266 | (
267 | "display_name",
268 | "String",
269 | """
270 | Human-readable detection name.
271 | Ex: NtdsFileAccessedViaVss
272 | """
273 | ),
274 | (
275 | "description",
276 | "String",
277 | """
278 | Detection description.
279 | Ex: Process accessed credential-containing NTDS.dit
280 | in a Volume Shadow Snapshot
281 | """
282 | ),
283 | (
284 | "type",
285 | "String",
286 | """
287 | Detection type classification. Possible values:
288 | - ldt: Legacy Detection Technology
289 | - ods: On-sensor Detection System
290 | - xdr: Extended Detection and Response
291 | - ofp: Offline Protection
292 | - ssd: Suspicious Script Detection
293 | - windows_legacy: Windows Legacy Detection
294 | Ex: ldt
295 | """
296 | ),
297 | (
298 | "show_in_ui",
299 | "Boolean",
300 | """
301 | Whether detection appears in UI.
302 | Ex: true
303 | """
304 | ),
305 | (
306 | "email_sent",
307 | "Boolean",
308 | """
309 | Whether email was sent for this detection.
310 | Ex: true
311 | """
312 | ),
313 | (
314 | "seconds_to_resolved",
315 | "Number",
316 | """
317 | Time in seconds to move from new to closed status.
318 | Ex: 3600
319 | """
320 | ),
321 | (
322 | "seconds_to_triaged",
323 | "Number",
324 | """
325 | Time in seconds to move from new to in_progress.
326 | Ex: 1800
327 | """
328 | ),
329 | (
330 | "comments.value",
331 | "String",
332 | """
333 | A single term in an alert comment. Matching is
334 | case sensitive. Partial match and wildcard search
335 | are not supported.
336 | Ex: suspicious
337 | """
338 | ),
339 | (
340 | "tags",
341 | "Array",
342 | """
343 | Contains a separated list of FalconGroupingTags
344 | and SensorGroupingTags (array field).
345 | Ex: ["fc/offering/falcon_complete",
346 | "fc/exclusion/pre-epp-migration", "fc/exclusion/nonlive"]
347 | """
348 | ),
349 | (
350 | "alleged_filetype",
351 | "String",
352 | """
353 | The alleged file type of the executable.
354 | Ex: exe
355 | """
356 | ),
357 | (
358 | "cmdline",
359 | "String",
360 | """
361 | Command line arguments used to start the process.
362 | Ex: powershell.exe -ExecutionPolicy Bypass
363 | """
364 | ),
365 | (
366 | "filename",
367 | "String",
368 | """
369 | Process filename without path.
370 | Ex: powershell.exe
371 | """
372 | ),
373 | (
374 | "filepath",
375 | "String",
376 | """
377 | Full file path of the executable.
378 | Ex: C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe
379 | """
380 | ),
381 | (
382 | "process_id",
383 | "String",
384 | """
385 | Process identifier.
386 | Ex: pid:12345:abcdef
387 | """
388 | ),
389 | (
390 | "parent_process_id",
391 | "String",
392 | """
393 | Parent process identifier.
394 | Ex: pid:12344:ghijkl
395 | """
396 | ),
397 | (
398 | "local_process_id",
399 | "Number",
400 | """
401 | Local process ID number.
402 | Ex: 12345
403 | """
404 | ),
405 | (
406 | "process_start_time",
407 | "Number",
408 | """
409 | Process start timestamp (epoch).
410 | Ex: 1724347200
411 | """
412 | ),
413 | (
414 | "process_end_time",
415 | "Number",
416 | """
417 | Process end timestamp (epoch).
418 | Ex: 1724347800
419 | """
420 | ),
421 | (
422 | "tree_id",
423 | "String",
424 | """
425 | Process tree identifier.
426 | Ex: tree:77d11725:abcd1234
427 | """
428 | ),
429 | (
430 | "tree_root",
431 | "String",
432 | """
433 | Process tree root identifier.
434 | Ex: root:77d11725:efgh5678
435 | """
436 | ),
437 | (
438 | "device.agent_load_flags",
439 | "String",
440 | """
441 | Agent load flags configuration.
442 | Ex: 0x00000001
443 | """
444 | ),
445 | (
446 | "device.agent_local_time",
447 | "Timestamp",
448 | """
449 | Agent local timestamp in UTC format.
450 | Ex: 2024-02-22T14:15:03.112Z
451 | """
452 | ),
453 | (
454 | "device.agent_version",
455 | "String",
456 | """
457 | CrowdStrike Falcon agent version.
458 | Ex: 7.10.19103.0
459 | """
460 | ),
461 | (
462 | "device.bios_manufacturer",
463 | "String",
464 | """
465 | System BIOS manufacturer name.
466 | Ex: Dell Inc.
467 | """
468 | ),
469 | (
470 | "device.bios_version",
471 | "String",
472 | """
473 | System BIOS version information.
474 | Ex: 2.18.0
475 | """
476 | ),
477 | (
478 | "device.config_id_base",
479 | "String",
480 | """
481 | Base configuration identifier.
482 | Ex: 65994753
483 | """
484 | ),
485 | (
486 | "device.config_id_build",
487 | "String",
488 | """
489 | Build configuration identifier.
490 | Ex: 19103
491 | """
492 | ),
493 | (
494 | "device.config_id_platform",
495 | "String",
496 | """
497 | Platform configuration identifier.
498 | Ex: 3
499 | """
500 | ),
501 | (
502 | "device.device_id",
503 | "String",
504 | """
505 | Unique device identifier.
506 | Ex: 77d11725xxxxxxxxxxxxxxxxxxxxc48ca19
507 | """
508 | ),
509 | (
510 | "device.external_ip",
511 | "String",
512 | """
513 | Device external/public IP address.
514 | Ex: 203.0.113.5
515 | """
516 | ),
517 | (
518 | "device.first_seen",
519 | "Timestamp",
520 | """
521 | First time device was seen in UTC format.
522 | Ex: 2024-01-15T10:30:00.000Z
523 | """
524 | ),
525 | (
526 | "device.hostname",
527 | "String",
528 | """
529 | Device hostname or computer name.
530 | Ex: DESKTOP-ABC123
531 | """
532 | ),
533 | (
534 | "device.last_seen",
535 | "Timestamp",
536 | """
537 | Last time device was seen in UTC format.
538 | Ex: 2024-02-22T14:15:03.112Z
539 | """
540 | ),
541 | (
542 | "device.local_ip",
543 | "String",
544 | """
545 | Device local/private IP address.
546 | Ex: 192.168.1.100
547 | """
548 | ),
549 | (
550 | "device.major_version",
551 | "String",
552 | """
553 | Operating system major version.
554 | Ex: 10
555 | """
556 | ),
557 | (
558 | "device.minor_version",
559 | "String",
560 | """
561 | Operating system minor version.
562 | Ex: 0
563 | """
564 | ),
565 | (
566 | "device.modified_timestamp",
567 | "Timestamp",
568 | """
569 | Device record last modified timestamp in UTC format.
570 | Ex: 2024-02-22T15:15:05.637Z
571 | """
572 | ),
573 | (
574 | "device.os_version",
575 | "String",
576 | """
577 | Complete operating system version string.
578 | Ex: Windows 10
579 | """
580 | ),
581 | (
582 | "device.ou",
583 | "String",
584 | """
585 | Organizational unit or domain path.
586 | Ex: OU=Computers,DC=example,DC=com
587 | """
588 | ),
589 | (
590 | "device.platform_id",
591 | "String",
592 | """
593 | Platform identifier code.
594 | Ex: 0
595 | """
596 | ),
597 | (
598 | "device.platform_name",
599 | "String",
600 | """
601 | Operating system platform name.
602 | Ex: Windows
603 | """
604 | ),
605 | (
606 | "device.product_type",
607 | "String",
608 | """
609 | Product type identifier.
610 | Ex: 1
611 | """
612 | ),
613 | (
614 | "device.product_type_desc",
615 | "String",
616 | """
617 | Product type description.
618 | Ex: Workstation
619 | """
620 | ),
621 | (
622 | "device.status",
623 | "String",
624 | """
625 | Device connection status.
626 | Ex: normal
627 | """
628 | ),
629 | (
630 | "device.system_manufacturer",
631 | "String",
632 | """
633 | System hardware manufacturer.
634 | Ex: Dell Inc.
635 | """
636 | ),
637 | (
638 | "device.system_product_name",
639 | "String",
640 | """
641 | System product model name.
642 | Ex: OptiPlex 7090
643 | """
644 | ),
645 | (
646 | "md5",
647 | "String",
648 | """
649 | MD5 hash of the file.
650 | Ex: 5d41402abc4b2a76b9719d911017c592
651 | """
652 | ),
653 | (
654 | "sha1",
655 | "String",
656 | """
657 | SHA1 hash of the file.
658 | Ex: aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d
659 | """
660 | ),
661 | (
662 | "sha256",
663 | "String",
664 | """
665 | SHA256 hash of the file.
666 | Ex: 13550350a8681c84c861aac2e5b440161c2b33a3e4f302ac680ca5b686de48de
667 | """
668 | ),
669 | (
670 | "global_prevalence",
671 | "String",
672 | """
673 | Global prevalence rating of the file.
674 | Ex: rare
675 | """
676 | ),
677 | (
678 | "local_prevalence",
679 | "String",
680 | """
681 | Local prevalence rating within the organization.
682 | Ex: common
683 | """
684 | ),
685 | (
686 | "charlotte.can_triage",
687 | "Boolean",
688 | """
689 | Whether alert can be triaged automatically.
690 | Ex: true
691 | """
692 | ),
693 | (
694 | "charlotte.triage_status",
695 | "String",
696 | """
697 | Automated triage status.
698 | Ex: triaged
699 | """
700 | ),
701 | (
702 | "incident.created",
703 | "Timestamp",
704 | """
705 | Incident creation timestamp in UTC format.
706 | Ex: 2024-02-22T14:15:03.112Z
707 | """
708 | ),
709 | (
710 | "incident.end",
711 | "Timestamp",
712 | """
713 | Incident end timestamp in UTC format.
714 | Ex: 2024-02-22T14:45:03.112Z
715 | """
716 | ),
717 | (
718 | "incident.id",
719 | "String",
720 | """
721 | Unique incident identifier.
722 | Ex: inc_12345abcdef
723 | """
724 | ),
725 | (
726 | "incident.score",
727 | "Number",
728 | """
729 | Incident severity score (1-100).
730 | Ex: 85
731 | """
732 | ),
733 | (
734 | "incident.start",
735 | "Timestamp",
736 | """
737 | Incident start timestamp in UTC format.
738 | Ex: 2024-02-22T14:15:03.112Z
739 | """
740 | ),
741 | (
742 | "indicator_id",
743 | "String",
744 | """
745 | Threat indicator identifier.
746 | Ex: ind_67890wxyz
747 | """
748 | ),
749 | (
750 | "parent_details.*",
751 | "Object",
752 | """
753 | Parent process information object. Use dot notation
754 | for specific fields like parent_details.cmdline,
755 | parent_details.filename, parent_details.filepath,
756 | parent_details.process_id, etc.
757 | Ex: parent_details.filename:'explorer.exe'
758 | """
759 | ),
760 | (
761 | "grandparent_details.*",
762 | "Object",
763 | """
764 | Grandparent process information object. Use dot
765 | notation for specific fields like
766 | grandparent_details.cmdline,
767 | grandparent_details.filename, etc.
768 | Ex: grandparent_details.filepath:'*winlogon*'
769 | """
770 | ),
771 | (
772 | "child_process_ids",
773 | "Array",
774 | """
775 | List of child process identifiers spawned by this
776 | process (array field).
777 | Ex: ["pid:12346:abcdef", "pid:12347:ghijkl"]
778 | """
779 | ),
780 | (
781 | "triggering_process_graph_id",
782 | "String",
783 | """
784 | Process graph identifier for the triggering process
785 | in the attack chain.
786 | Ex: graph:77d11725:trigger123
787 | """
788 | ),
789 | (
790 | "ioc_context",
791 | "Array",
792 | """
793 | IOC context information and metadata (array field).
794 | Ex: ["malware_family", "apt_group"]
795 | """
796 | ),
797 | (
798 | "ioc_values",
799 | "Array",
800 | """
801 | IOC values associated with the alert (array field).
802 | Ex: ["192.168.1.100", "malicious.exe"]
803 | """
804 | ),
805 | (
806 | "falcon_host_link",
807 | "String",
808 | """
809 | Direct link to Falcon console for this host.
810 | Ex: https://falcon.crowdstrike.com/hosts/detail/77d11725xxxxxxxxxxxxxxxxxxxxc48ca19
811 | """
812 | ),
813 | (
814 | "user_id",
815 | "String",
816 | """
817 | User identifier associated with the process.
818 | Ex: S-1-5-21-1234567890-987654321-1122334455-1001
819 | """
820 | ),
821 | (
822 | "user_name",
823 | "String",
824 | """
825 | Username associated with the process.
826 | Ex: administrator
827 | """
828 | ),
829 | (
830 | "logon_domain",
831 | "String",
832 | """
833 | Logon domain name for the user.
834 | Ex: CORP
835 | """
836 | ),
837 | ]
838 |
839 | SEARCH_DETECTIONS_FQL_DOCUMENTATION = r"""Falcon Query Language (FQL) - Search Detections/Alerts Guide
840 |
841 | === BASIC SYNTAX ===
842 | field_name:[operator]'value'
843 |
844 | === OPERATORS ===
845 | • = (default): field_name:'value'
846 | • !: field_name:!'value' (not equal)
847 | • >, >=, <, <=: field_name:>50 (comparison)
848 | • ~: field_name:~'partial' (text match, case insensitive)
849 | • !~: field_name:!~'exclude' (not text match)
850 | • *: field_name:'prefix*' or field_name:'*suffix*' (wildcards)
851 |
852 | === DATA TYPES ===
853 | • String: 'value'
854 | • Number: 123 (no quotes)
855 | • Boolean: true/false (no quotes)
856 | • Timestamp: 'YYYY-MM-DDTHH:MM:SSZ'
857 | • Array: ['value1', 'value2']
858 |
859 | === WILDCARDS ===
860 | ✅ **String & Number fields**: field_name:'pattern*' (prefix), field_name:'*pattern' (suffix), field_name:'*pattern*' (contains)
861 | ❌ **Timestamp fields**: Not supported (causes errors)
862 | ⚠️ **Number wildcards**: Require quotes: pattern_id:'123*'
863 |
864 | === COMBINING ===
865 | • + = AND: status:'new'+severity:>=70
866 | • , = OR: product:'epp',product:'xdr'
867 | • () = GROUPING: status:'new'+(severity:>=60+severity:<80)+product:'epp'
868 |
869 | === COMMON PATTERNS ===
870 | 🔍 SORT OPTIONS:
871 | • timestamp: Timestamp when the detection occurred
872 | • created_timestamp: When the detection was created
873 | • updated_timestamp: When the detection was last modified
874 | • severity: Severity level of the detection (recommended when sorting by severity)
875 | • confidence: Confidence level of the detection
876 | • agent_id: Agent ID associated with the detection
877 |
878 | Sort either asc (ascending) or desc (descending).
879 | Both formats are supported: 'severity.desc' or 'severity|desc'
880 |
881 | When searching for high severity detections, use 'severity.desc' to get the highest severity detections first.
882 | For chronological ordering, use 'timestamp.desc' for most recent detections first.
883 |
884 | Examples: 'severity.desc', 'timestamp.desc'
885 |
886 | 🔍 SEVERITY RANGES:
887 |
888 | **Numeric Ranges (for precise filtering):**
889 | • Informational: severity:<20
890 | • Low: severity:>=20+severity:<40
891 | • Medium: severity:>=40+severity:<60
892 | • High: severity:>=60+severity:<80
893 | • Critical: severity:>=80
894 |
895 | **Name-based (easier to use):**
896 | • severity_name:'Informational' (severity 1-19)
897 | • severity_name:'Low' (severity 20-39)
898 | • severity_name:'Medium' (severity 40-59)
899 | • severity_name:'High' (severity 60-79)
900 | • severity_name:'Critical' (severity 80-100)
901 |
902 | **Range Examples:**
903 | • Medium severity and above: severity:>=40 OR severity_name:'Medium',severity_name:'High',severity_name:'Critical'
904 | • High severity and above: severity:>=60 OR severity_name:'High',severity_name:'Critical'
905 | • Critical alerts only: severity:>=80 OR severity_name:'Critical'
906 |
907 | 🔍 ESSENTIAL FILTERS:
908 | • Status: status:'new' | status:'in_progress' | status:'closed' | status:'reopened'
909 | • Severity (by name): severity_name:'High' | severity_name:'Critical' | severity_name:'Medium' | severity_name:'Low' | severity_name:'Informational'
910 | • Severity (by range): severity:>=80 (Critical+) | severity:>=60 (High+) | severity:>=40 (Medium+) | severity:>=20 (Low+)
911 | • Product: product:'epp' | product:'idp' | product:'xdr' | product:'overwatch' (see field table for all)
912 | • Assignment: assigned_to_name:!'*' (unassigned) | assigned_to_name:'user.name'
913 | • Timestamps: created_timestamp:>'2025-01-01T00:00:00Z' | created_timestamp:>='date1'+created_timestamp:<='date2'
914 | • Wildcards: name:'EICAR*' | description:'*credential*' | agent_id:'77d11725*' | pattern_id:'301*'
915 | • Combinations: status:'new'+severity_name:'High'+product:'epp' | status:'new'+severity:>=70+product:'epp' | product:'epp',product:'xdr'
916 |
917 | 🔍 EPP-SPECIFIC PATTERNS:
918 | • Device targeting: product:'epp'+device.hostname:'DC*' | product:'epp'+device.external_ip:'192.168.*'
919 | • Process analysis: product:'epp'+filename:'*cmd*'+cmdline:'*password*' | product:'epp'+filepath:'*system32*'
920 | • Hash investigation: product:'epp'+sha256:'abc123...' | product:'epp'+md5:'def456...'
921 | • Incident correlation: product:'epp'+incident.id:'inc_12345' | product:'epp'+incident.score:>=80
922 | • User activity: product:'epp'+user_name:'admin*' | product:'epp'+logon_domain:'CORP'
923 | • Nested queries: product:'epp'+device.agent_version:'7.*' | product:'epp'+parent_details.filename:'*explorer*'
924 |
925 | === falcon_search_detections FQL filter available fields ===
926 |
927 | """ + generate_md_table(SEARCH_DETECTIONS_FQL_FILTERS) + """
928 |
929 | === COMPLEX FILTER EXAMPLES ===
930 |
931 | # New high-severity endpoint alerts (numeric approach)
932 | status:'new'+(severity:>=60+severity:<80)+product:'epp'
933 |
934 | # New high-severity endpoint alerts (name-based approach)
935 | status:'new'+severity_name:'High',severity_name:'Critical'+product:'epp'
936 |
937 | # Unassigned critical alerts from last 24 hours (numeric)
938 | assigned_to_name:!'*'+severity:>=90+created_timestamp:>'2025-01-19T00:00:00Z'
939 |
940 | # Unassigned critical alerts from last 24 hours (name-based)
941 | assigned_to_name:!'*'+severity_name:'Critical'+created_timestamp:>'2025-01-19T00:00:00Z'
942 |
943 | # Medium severity and above endpoint alerts (numeric - easier for ranges)
944 | severity:>=40+product:'epp'+status:'new'
945 |
946 | # Medium severity and above endpoint alerts (name-based - more explicit)
947 | severity_name:'Medium',severity_name:'High',severity_name:'Critical'+product:'epp'+status:'new'
948 |
949 | # OverWatch alerts with credential access tactics
950 | product:'overwatch'+tactic:'Credential Access'
951 |
952 | # XDR high severity alerts with specific technique (name-based)
953 | product:'xdr'+severity_name:'High'+technique_id:'T1003'
954 |
955 | # XDR high severity alerts with specific technique (numeric)
956 | product:'xdr'+severity:>=60+technique_id:'T1003'
957 |
958 | # Find alerts by aggregate_id (related alerts)
959 | aggregate_id:'aggind:77d1172532c8xxxxxxxxxxxxxxxxxxxx49030016385'
960 |
961 | # Find alerts from multiple products
962 | product:['epp', 'xdr', 'overwatch']
963 |
964 | # Recently updated critical alerts assigned to specific analyst
965 | assigned_to_name:'alice.anderson'+updated_timestamp:>'2025-01-18T12:00:00Z'+severity_name:'Critical'
966 |
967 | # Find low-priority informational alerts for cleanup
968 | severity_name:'Informational'+status:'closed'+assigned_to_name:!'*'
969 |
970 | # Find alerts with specific MITRE ATT&CK tactics and medium+ severity
971 | tactic:['Credential Access', 'Persistence', 'Privilege Escalation']+severity:>=40
972 |
973 | # Closed alerts resolved quickly (under 1 hour) - high severity only
974 | status:'closed'+seconds_to_resolved:<3600+severity_name:'High',severity_name:'Critical'
975 |
976 | # Date range with multiple products and high+ severity (name-based)
977 | created_timestamp:>='2025-01-15T00:00:00Z'+created_timestamp:<='2025-01-20T00:00:00Z'+product:'epp',product:'xdr'+severity_name:'High',severity_name:'Critical'
978 |
979 | # Date range with multiple products and high+ severity (numeric)
980 | created_timestamp:>='2025-01-15T00:00:00Z'+created_timestamp:<='2025-01-20T00:00:00Z'+product:'epp',product:'xdr'+severity:>=60
981 |
982 | # All unassigned alerts except informational (name-based exclusion)
983 | assigned_to_name:!'*'+severity_name:!'Informational'
984 |
985 | # All unassigned alerts except informational (numeric approach)
986 | assigned_to_name:!'*'+severity:>=20
987 | """
988 |
```
--------------------------------------------------------------------------------
/tests/modules/test_idp.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Tests for the IDP (Identity Protection) module.
3 | """
4 |
5 | import unittest
6 |
7 | from falcon_mcp.modules.idp import IdpModule
8 | from tests.modules.utils.test_modules import TestModules
9 |
10 |
11 | class TestIdpModule(TestModules):
12 | """Test cases for the IDP module."""
13 |
14 | def setUp(self):
15 | """Set up test fixtures."""
16 | self.setup_module(IdpModule)
17 |
18 | def test_register_tools(self):
19 | """Test registering tools with the server."""
20 | expected_tools = [
21 | "falcon_idp_investigate_entity",
22 | ]
23 | self.assert_tools_registered(expected_tools)
24 |
25 | def test_investigate_entity_basic_functionality(self):
26 | """Test basic entity investigation functionality."""
27 | # Setup mock GraphQL response for entity resolution
28 | mock_response = {
29 | "status_code": 200,
30 | "body": {
31 | "data": {
32 | "entities": {
33 | "nodes": [
34 | {
35 | "entityId": "test-entity-123",
36 | "primaryDisplayName": "Test User",
37 | "secondaryDisplayName": "[email protected]",
38 | "type": "USER",
39 | "riskScore": 75,
40 | "riskScoreSeverity": "MEDIUM",
41 | }
42 | ]
43 | }
44 | }
45 | },
46 | }
47 | self.mock_client.command.return_value = mock_response
48 |
49 | # Call investigate_entity with basic parameters
50 | result = self.module.investigate_entity(
51 | entity_names=["Test User"],
52 | investigation_types=["entity_details"],
53 | limit=10,
54 | )
55 |
56 | # Verify client command was called (at least for entity resolution)
57 | self.assertTrue(self.mock_client.command.called)
58 |
59 | # Verify result structure
60 | self.assertIn("investigation_summary", result)
61 | self.assertIn("entity_details", result)
62 | self.assertEqual(result["investigation_summary"]["status"], "completed")
63 | self.assertGreater(result["investigation_summary"]["entity_count"], 0)
64 |
65 | def test_investigate_entity_with_multiple_investigation_types(self):
66 | """Test entity investigation with multiple investigation types."""
67 | # Setup mock GraphQL responses for different investigation types
68 | mock_responses = [
69 | # Entity resolution response
70 | {
71 | "status_code": 200,
72 | "body": {
73 | "data": {
74 | "entities": {
75 | "nodes": [
76 | {
77 | "entityId": "test-entity-456",
78 | "primaryDisplayName": "Admin User",
79 | "secondaryDisplayName": "[email protected]",
80 | }
81 | ]
82 | }
83 | }
84 | },
85 | },
86 | # Entity details response
87 | {
88 | "status_code": 200,
89 | "body": {
90 | "data": {
91 | "entities": {
92 | "nodes": [
93 | {
94 | "entityId": "test-entity-456",
95 | "primaryDisplayName": "Admin User",
96 | "secondaryDisplayName": "[email protected]",
97 | "type": "USER",
98 | "riskScore": 85,
99 | "riskScoreSeverity": "HIGH",
100 | "riskFactors": [
101 | {
102 | "type": "PRIVILEGED_ACCESS",
103 | "severity": "HIGH",
104 | }
105 | ],
106 | }
107 | ]
108 | }
109 | }
110 | },
111 | },
112 | # Timeline response
113 | {
114 | "status_code": 200,
115 | "body": {
116 | "data": {
117 | "timeline": {
118 | "nodes": [
119 | {
120 | "eventId": "event-123",
121 | "eventType": "AUTHENTICATION",
122 | "timestamp": "2024-01-01T12:00:00Z",
123 | }
124 | ],
125 | "pageInfo": {"hasNextPage": False},
126 | }
127 | }
128 | },
129 | },
130 | # Relationship analysis response
131 | {
132 | "status_code": 200,
133 | "body": {
134 | "data": {
135 | "entities": {
136 | "nodes": [
137 | {
138 | "entityId": "test-entity-456",
139 | "primaryDisplayName": "Admin User",
140 | "associations": [
141 | {
142 | "bindingType": "OWNERSHIP",
143 | "entity": {
144 | "entityId": "server-789",
145 | "primaryDisplayName": "Test Server",
146 | },
147 | }
148 | ],
149 | }
150 | ]
151 | }
152 | }
153 | },
154 | },
155 | # Risk assessment response
156 | {
157 | "status_code": 200,
158 | "body": {
159 | "data": {
160 | "entities": {
161 | "nodes": [
162 | {
163 | "entityId": "test-entity-456",
164 | "primaryDisplayName": "Admin User",
165 | "riskScore": 85,
166 | "riskScoreSeverity": "HIGH",
167 | "riskFactors": [
168 | {
169 | "type": "PRIVILEGED_ACCESS",
170 | "severity": "HIGH",
171 | }
172 | ],
173 | }
174 | ]
175 | }
176 | }
177 | },
178 | },
179 | ]
180 | self.mock_client.command.side_effect = mock_responses
181 |
182 | # Call investigate_entity with multiple investigation types
183 | result = self.module.investigate_entity(
184 | email_addresses=["[email protected]"],
185 | investigation_types=[
186 | "entity_details",
187 | "timeline_analysis",
188 | "relationship_analysis",
189 | "risk_assessment",
190 | ],
191 | limit=50,
192 | include_associations=True,
193 | include_accounts=True,
194 | include_incidents=True,
195 | )
196 |
197 | # Verify multiple client commands were called
198 | self.assertGreaterEqual(self.mock_client.command.call_count, 2)
199 |
200 | # Verify result structure contains all investigation types
201 | self.assertIn("investigation_summary", result)
202 | self.assertIn("entity_details", result)
203 | self.assertIn("timeline_analysis", result)
204 | self.assertIn("relationship_analysis", result)
205 | self.assertIn("risk_assessment", result)
206 |
207 | # Verify investigation summary
208 | self.assertEqual(result["investigation_summary"]["status"], "completed")
209 | self.assertGreater(result["investigation_summary"]["entity_count"], 0)
210 | self.assertEqual(len(result["investigation_summary"]["investigation_types"]), 4)
211 |
212 | def test_investigate_entity_no_identifiers_error(self):
213 | """Test error handling when no entity identifiers are provided."""
214 | # Call investigate_entity without any identifiers
215 | result = self.module.investigate_entity()
216 |
217 | # Verify error response
218 | self.assertIn("error", result)
219 | self.assertIn("investigation_summary", result)
220 | self.assertEqual(result["investigation_summary"]["status"], "failed")
221 | self.assertEqual(result["investigation_summary"]["entity_count"], 0)
222 |
223 | # Verify no API calls were made
224 | self.assertFalse(self.mock_client.command.called)
225 |
226 | def test_investigate_entity_no_entities_found(self):
227 | """Test handling when no entities are found matching criteria."""
228 | # Setup mock response with no entities
229 | mock_response = {
230 | "status_code": 200,
231 | "body": {"data": {"entities": {"nodes": []}}},
232 | }
233 | self.mock_client.command.return_value = mock_response
234 |
235 | # Call investigate_entity
236 | result = self.module.investigate_entity(entity_names=["NonExistent User"])
237 |
238 | # Verify result indicates no entities found
239 | self.assertIn("error", result)
240 | self.assertIn("investigation_summary", result)
241 | self.assertEqual(result["investigation_summary"]["status"], "failed")
242 | self.assertEqual(result["investigation_summary"]["entity_count"], 0)
243 | self.assertIn("search_criteria", result)
244 |
245 |
246 | def test_investigate_entity_with_geographic_location_data(self):
247 | """Test entity investigation includes geographic location data in timeline analysis."""
248 | # Setup mock GraphQL responses with geographic location data
249 | mock_responses = [
250 | # Entity resolution response
251 | {
252 | "status_code": 200,
253 | "body": {
254 | "data": {
255 | "entities": {
256 | "nodes": [
257 | {
258 | "entityId": "test-entity-geo-123",
259 | "primaryDisplayName": "Global User",
260 | "secondaryDisplayName": "[email protected]",
261 | }
262 | ]
263 | }
264 | }
265 | },
266 | },
267 | # Timeline response with geographic location data
268 | {
269 | "status_code": 200,
270 | "body": {
271 | "data": {
272 | "timeline": {
273 | "nodes": [
274 | {
275 | "eventId": "auth-event-123",
276 | "eventType": "AUTHENTICATION",
277 | "eventSeverity": "MEDIUM",
278 | "timestamp": "2024-01-01T12:00:00Z",
279 | "sourceEntity": {
280 | "entityId": "test-entity-geo-123",
281 | "primaryDisplayName": "Global User",
282 | },
283 | "targetEntity": {
284 | "entityId": "server-456",
285 | "primaryDisplayName": "Corporate Server",
286 | },
287 | "geoLocation": {
288 | "country": "United States",
289 | "countryCode": "US",
290 | "city": "New York",
291 | "cityCode": "NYC",
292 | "latitude": 40.7128,
293 | "longitude": -74.0060,
294 | },
295 | "locationAssociatedWithUser": True,
296 | "userDisplayName": "Global User",
297 | "endpointDisplayName": "NYC-Workstation-01",
298 | "ipAddress": "192.168.1.100",
299 | },
300 | {
301 | "eventId": "auth-event-456",
302 | "eventType": "AUTHENTICATION",
303 | "eventSeverity": "HIGH",
304 | "timestamp": "2024-01-02T08:30:00Z",
305 | "sourceEntity": {
306 | "entityId": "test-entity-geo-123",
307 | "primaryDisplayName": "Global User",
308 | },
309 | "targetEntity": {
310 | "entityId": "server-789",
311 | "primaryDisplayName": "Remote Server",
312 | },
313 | "geoLocation": {
314 | "country": "Germany",
315 | "countryCode": "DE",
316 | "city": "Berlin",
317 | "cityCode": "BER",
318 | "latitude": 52.5200,
319 | "longitude": 13.4050,
320 | },
321 | "locationAssociatedWithUser": True,
322 | "userDisplayName": "Global User",
323 | "endpointDisplayName": "BER-Laptop-02",
324 | "ipAddress": "10.0.0.50",
325 | },
326 | ],
327 | "pageInfo": {"hasNextPage": False},
328 | }
329 | }
330 | },
331 | },
332 | ]
333 | self.mock_client.command.side_effect = mock_responses
334 |
335 | # Call investigate_entity with timeline analysis to get geographic data
336 | result = self.module.investigate_entity(
337 | entity_names=["Global User"],
338 | investigation_types=["timeline_analysis"],
339 | timeline_start_time="2024-01-01T00:00:00Z",
340 | timeline_end_time="2024-01-02T23:59:59Z",
341 | limit=50,
342 | )
343 |
344 | # Verify result structure
345 | self.assertIn("investigation_summary", result)
346 | self.assertIn("timeline_analysis", result)
347 | self.assertEqual(result["investigation_summary"]["status"], "completed")
348 |
349 | # Verify geographic location data is present in timeline events
350 | timeline_data = result["timeline_analysis"]["timelines"][0]["timeline"]
351 | self.assertGreater(len(timeline_data), 0)
352 |
353 | # Check first event has geographic location data
354 | first_event = timeline_data[0]
355 | self.assertIn("geoLocation", first_event)
356 | self.assertIn("country", first_event["geoLocation"])
357 | self.assertIn("countryCode", first_event["geoLocation"])
358 | self.assertIn("city", first_event["geoLocation"])
359 | self.assertIn("cityCode", first_event["geoLocation"])
360 | self.assertIn("latitude", first_event["geoLocation"])
361 | self.assertIn("longitude", first_event["geoLocation"])
362 |
363 | # Verify geographic location values
364 | self.assertEqual(first_event["geoLocation"]["country"], "United States")
365 | self.assertEqual(first_event["geoLocation"]["countryCode"], "US")
366 | self.assertEqual(first_event["geoLocation"]["city"], "New York")
367 | self.assertEqual(first_event["geoLocation"]["cityCode"], "NYC")
368 |
369 | # Check additional location fields
370 | self.assertIn("locationAssociatedWithUser", first_event)
371 | self.assertIn("userDisplayName", first_event)
372 | self.assertIn("endpointDisplayName", first_event)
373 | self.assertIn("ipAddress", first_event)
374 |
375 | # Verify second event has different country (multi-location user)
376 | second_event = timeline_data[1]
377 | self.assertEqual(second_event["geoLocation"]["country"], "Germany")
378 | self.assertEqual(second_event["geoLocation"]["countryCode"], "DE")
379 |
380 | def test_investigate_entity_with_geo_location_associations(self):
381 | """Test entity investigation includes geographic location associations."""
382 | # Setup mock GraphQL responses with geographic location associations
383 | mock_responses = [
384 | # Entity resolution response
385 | {
386 | "status_code": 200,
387 | "body": {
388 | "data": {
389 | "entities": {
390 | "nodes": [
391 | {
392 | "entityId": "test-entity-assoc-456",
393 | "primaryDisplayName": "Travel User",
394 | "secondaryDisplayName": "[email protected]",
395 | }
396 | ]
397 | }
398 | }
399 | },
400 | },
401 | # Entity details response with geographic associations
402 | {
403 | "status_code": 200,
404 | "body": {
405 | "data": {
406 | "entities": {
407 | "nodes": [
408 | {
409 | "entityId": "test-entity-assoc-456",
410 | "primaryDisplayName": "Travel User",
411 | "secondaryDisplayName": "[email protected]",
412 | "type": "USER",
413 | "riskScore": 60,
414 | "riskScoreSeverity": "MEDIUM",
415 | "associations": [
416 | {
417 | "bindingType": "LOCATION_ACCESS",
418 | "geoLocation": {
419 | "country": "France",
420 | "countryCode": "FR",
421 | "city": "Paris",
422 | "cityCode": "PAR",
423 | "latitude": 48.8566,
424 | "longitude": 2.3522,
425 | },
426 | },
427 | {
428 | "bindingType": "LOCATION_ACCESS",
429 | "geoLocation": {
430 | "country": "Japan",
431 | "countryCode": "JP",
432 | "city": "Tokyo",
433 | "cityCode": "TYO",
434 | "latitude": 35.6762,
435 | "longitude": 139.6503,
436 | },
437 | },
438 | ],
439 | }
440 | ]
441 | }
442 | }
443 | },
444 | },
445 | ]
446 | self.mock_client.command.side_effect = mock_responses
447 |
448 | # Call investigate_entity with entity details to get geographic associations
449 | result = self.module.investigate_entity(
450 | entity_names=["Travel User"],
451 | investigation_types=["entity_details"],
452 | include_associations=True,
453 | limit=50,
454 | )
455 |
456 | # Verify result structure
457 | self.assertIn("investigation_summary", result)
458 | self.assertIn("entity_details", result)
459 | self.assertEqual(result["investigation_summary"]["status"], "completed")
460 |
461 | # Verify geographic location associations are present
462 | entity_data = result["entity_details"]["entities"][0]
463 | self.assertIn("associations", entity_data)
464 | associations = entity_data["associations"]
465 | self.assertGreater(len(associations), 0)
466 |
467 | # Check geographic location associations
468 | geo_associations = [
469 | assoc for assoc in associations if "geoLocation" in assoc
470 | ]
471 | self.assertGreater(len(geo_associations), 0)
472 |
473 | # Verify first geographic association
474 | first_geo_assoc = geo_associations[0]
475 | self.assertIn("geoLocation", first_geo_assoc)
476 | geo_location = first_geo_assoc["geoLocation"]
477 | self.assertIn("country", geo_location)
478 | self.assertIn("countryCode", geo_location)
479 | self.assertIn("city", geo_location)
480 | self.assertIn("cityCode", geo_location)
481 | self.assertIn("latitude", geo_location)
482 | self.assertIn("longitude", geo_location)
483 |
484 | # Verify geographic location values
485 | self.assertEqual(geo_location["country"], "France")
486 | self.assertEqual(geo_location["countryCode"], "FR")
487 |
488 | def test_investigate_entity_multi_country_detection(self):
489 | """Test detection of users active in multiple countries."""
490 | # Setup mock GraphQL responses simulating user activity in 4+ countries
491 | mock_responses = [
492 | # Entity resolution response
493 | {
494 | "status_code": 200,
495 | "body": {
496 | "data": {
497 | "entities": {
498 | "nodes": [
499 | {
500 | "entityId": "multi-country-user-789",
501 | "primaryDisplayName": "Global Executive",
502 | "secondaryDisplayName": "[email protected]",
503 | }
504 | ]
505 | }
506 | }
507 | },
508 | },
509 | # Timeline response with activities from multiple countries
510 | {
511 | "status_code": 200,
512 | "body": {
513 | "data": {
514 | "timeline": {
515 | "nodes": [
516 | {
517 | "eventId": "event-us-001",
518 | "eventType": "SUCCESSFUL_AUTHENTICATION",
519 | "timestamp": "2024-01-01T09:00:00Z",
520 | "geoLocation": {
521 | "country": "United States",
522 | "countryCode": "US",
523 | "city": "San Francisco",
524 | "cityCode": "SFO",
525 | },
526 | "locationAssociatedWithUser": True,
527 | },
528 | {
529 | "eventId": "event-uk-002",
530 | "eventType": "SUCCESSFUL_AUTHENTICATION",
531 | "timestamp": "2024-01-02T14:30:00Z",
532 | "geoLocation": {
533 | "country": "United Kingdom",
534 | "countryCode": "GB",
535 | "city": "London",
536 | "cityCode": "LDN",
537 | },
538 | "locationAssociatedWithUser": True,
539 | },
540 | {
541 | "eventId": "event-sg-003",
542 | "eventType": "SUCCESSFUL_AUTHENTICATION",
543 | "timestamp": "2024-01-03T22:15:00Z",
544 | "geoLocation": {
545 | "country": "Singapore",
546 | "countryCode": "SG",
547 | "city": "Singapore",
548 | "cityCode": "SIN",
549 | },
550 | "locationAssociatedWithUser": True,
551 | },
552 | {
553 | "eventId": "event-au-004",
554 | "eventType": "SUCCESSFUL_AUTHENTICATION",
555 | "timestamp": "2024-01-04T05:45:00Z",
556 | "geoLocation": {
557 | "country": "Australia",
558 | "countryCode": "AU",
559 | "city": "Sydney",
560 | "cityCode": "SYD",
561 | },
562 | "locationAssociatedWithUser": True,
563 | },
564 | ],
565 | "pageInfo": {"hasNextPage": False},
566 | }
567 | }
568 | },
569 | },
570 | ]
571 | self.mock_client.command.side_effect = mock_responses
572 |
573 | # Call investigate_entity with timeline analysis
574 | result = self.module.investigate_entity(
575 | entity_names=["Global Executive"],
576 | investigation_types=["timeline_analysis"],
577 | timeline_start_time="2024-01-01T00:00:00Z",
578 | timeline_end_time="2024-01-04T23:59:59Z",
579 | limit=100,
580 | )
581 |
582 | # Verify result structure
583 | self.assertIn("timeline_analysis", result)
584 | timeline_events = result["timeline_analysis"]["timelines"][0]["timeline"]
585 | self.assertEqual(len(timeline_events), 4)
586 |
587 | # Extract unique countries from timeline events
588 | countries = set()
589 | for event in timeline_events:
590 | if "geoLocation" in event and "country" in event["geoLocation"]:
591 | countries.add(event["geoLocation"]["country"])
592 |
593 | # Verify user has been active in 4 different countries
594 | expected_countries = {"United States", "United Kingdom", "Singapore", "Australia"}
595 | self.assertEqual(countries, expected_countries)
596 | self.assertEqual(len(countries), 4)
597 |
598 | # Verify each event has proper geographic location structure
599 | for event in timeline_events:
600 | self.assertIn("geoLocation", event)
601 | geo_loc = event["geoLocation"]
602 | self.assertIn("country", geo_loc)
603 | self.assertIn("countryCode", geo_loc)
604 | self.assertIn("city", geo_loc)
605 | self.assertIn("cityCode", geo_loc)
606 | self.assertTrue(event.get("locationAssociatedWithUser", False))
607 |
608 | def test_investigate_entity_file_operation_geographic_data(self):
609 | """Test geographic location data in file operation events (targetEntity only)."""
610 | # Setup mock response for file operation event with geographic data
611 | mock_responses = [
612 | # Entity resolution response
613 | {
614 | "status_code": 200,
615 | "body": {
616 | "data": {
617 | "entities": {
618 | "nodes": [
619 | {
620 | "entityId": "file-user-123",
621 | "primaryDisplayName": "File User",
622 | }
623 | ]
624 | }
625 | }
626 | },
627 | },
628 | # Timeline response with file operation event
629 | {
630 | "status_code": 200,
631 | "body": {
632 | "data": {
633 | "timeline": {
634 | "nodes": [
635 | {
636 | "eventId": "file-op-001",
637 | "eventType": "FILE_OPERATION",
638 | "timestamp": "2024-01-01T15:30:00Z",
639 | "targetEntity": {
640 | "entityId": "file-server-456",
641 | "primaryDisplayName": "Shared File Server",
642 | },
643 | "geoLocation": {
644 | "country": "Canada",
645 | "countryCode": "CA",
646 | "city": "Toronto",
647 | "cityCode": "YYZ",
648 | "latitude": 43.6532,
649 | "longitude": -79.3832,
650 | },
651 | "locationAssociatedWithUser": True,
652 | "userDisplayName": "File User",
653 | "endpointDisplayName": "TOR-Desktop-01",
654 | "ipAddress": "172.16.0.25",
655 | }
656 | ],
657 | "pageInfo": {"hasNextPage": False},
658 | }
659 | }
660 | },
661 | },
662 | ]
663 | self.mock_client.command.side_effect = mock_responses
664 |
665 | # Call investigate_entity with timeline analysis
666 | result = self.module.investigate_entity(
667 | entity_names=["File User"],
668 | investigation_types=["timeline_analysis"],
669 | limit=50,
670 | )
671 |
672 | # Verify file operation event has geographic data
673 | timeline_events = result["timeline_analysis"]["timelines"][0]["timeline"]
674 | file_event = timeline_events[0]
675 |
676 | # Verify event structure (should have targetEntity but no sourceEntity for file operations)
677 | self.assertIn("targetEntity", file_event)
678 | self.assertNotIn("sourceEntity", file_event)
679 |
680 | # Verify geographic location data is present
681 | self.assertIn("geoLocation", file_event)
682 | geo_loc = file_event["geoLocation"]
683 | self.assertEqual(geo_loc["country"], "Canada")
684 | self.assertEqual(geo_loc["countryCode"], "CA")
685 | self.assertEqual(geo_loc["city"], "Toronto")
686 |
687 | # Verify additional location context
688 | self.assertIn("locationAssociatedWithUser", file_event)
689 | self.assertIn("userDisplayName", file_event)
690 | self.assertIn("endpointDisplayName", file_event)
691 | self.assertIn("ipAddress", file_event)
692 |
693 |
694 | if __name__ == "__main__":
695 | unittest.main()
696 |
```
--------------------------------------------------------------------------------
/falcon_mcp/modules/idp.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Identity Protection (IDP) module for Falcon MCP Server
3 |
4 | This module provides tool for accessing and managing CrowdStrike Falcon Identity Protection capabilities.
5 | Core use cases:
6 | 1. Entity Lookup & Investigation
7 | """
8 |
9 | import json
10 | from datetime import datetime
11 | from typing import Any, Dict, List
12 |
13 | from mcp.server import FastMCP
14 | from pydantic import Field
15 |
16 | from falcon_mcp.common.errors import handle_api_response
17 | from falcon_mcp.common.logging import get_logger
18 | from falcon_mcp.common.utils import sanitize_input
19 | from falcon_mcp.modules.base import BaseModule
20 |
21 | logger = get_logger(__name__)
22 |
23 |
24 | class IdpModule(BaseModule):
25 | """Module for accessing and managing CrowdStrike Falcon Identity Protection."""
26 |
27 | def register_tools(self, server: FastMCP) -> None:
28 | """Register IDP tools with the MCP server.
29 |
30 | Args:
31 | server: MCP server instance
32 | """
33 | # Entity Investigation Tool
34 | self._add_tool(
35 | server=server,
36 | method=self.investigate_entity,
37 | name="idp_investigate_entity",
38 | )
39 |
40 | # ==========================================
41 | # Entity Investigation Tool
42 | # ==========================================
43 |
44 | def investigate_entity(
45 | self,
46 | # Entity Identification (Required - at least one)
47 | entity_ids: list[str] | None = Field(
48 | default=None,
49 | description="List of specific entity IDs to investigate (e.g., ['entity-001'])",
50 | ),
51 | entity_names: list[str] | None = Field(
52 | default=None,
53 | description="List of entity names to search for (e.g., ['Administrator', 'John Doe']). When combined with other parameters, uses AND logic.",
54 | ),
55 | email_addresses: list[str] | None = Field(
56 | default=None,
57 | description="List of email addresses to investigate (e.g., ['[email protected]']). When combined with other parameters, uses AND logic.",
58 | ),
59 | ip_addresses: list[str] | None = Field(
60 | default=None,
61 | description="List of IP addresses/endpoints to investigate (e.g., ['1.1.1.1']). When combined with other parameters, uses AND logic.",
62 | ),
63 | domain_names: list[str] | None = Field(
64 | default=None,
65 | description="List of domain names to search for (e.g., ['XDRHOLDINGS.COM', 'CORP.LOCAL']). When combined with other parameters, uses AND logic. Example: entity_names=['Administrator'] + domain_names=['DOMAIN.COM'] finds Administrator user in that specific domain.",
66 | ),
67 | # Investigation Scope Control
68 | investigation_types: list[str] = Field(
69 | default=["entity_details"],
70 | description="Types of investigation to perform: 'entity_details', 'timeline_analysis', 'relationship_analysis', 'risk_assessment'. Use multiple for comprehensive analysis.",
71 | ),
72 | # Timeline Parameters (when timeline_analysis is included)
73 | timeline_start_time: str | None = Field(
74 | default=None,
75 | description="Start time for timeline analysis in ISO format (e.g., '2024-01-01T00:00:00Z')",
76 | ),
77 | timeline_end_time: str | None = Field(
78 | default=None,
79 | description="End time for timeline analysis in ISO format",
80 | ),
81 | timeline_event_types: list[str] | None = Field(
82 | default=None,
83 | description="Filter timeline by event types: 'ACTIVITY', 'NOTIFICATION', 'THREAT', 'ENTITY', 'AUDIT', 'POLICY', 'SYSTEM'",
84 | ),
85 | # Relationship Parameters (when relationship_analysis is included)
86 | relationship_depth: int = Field(
87 | default=2,
88 | ge=1,
89 | le=3,
90 | description="Depth of relationship analysis (1-3 levels)",
91 | ),
92 | # General Parameters
93 | limit: int = Field(
94 | default=10,
95 | ge=1,
96 | le=200,
97 | description="Maximum number of results to return",
98 | ),
99 | include_associations: bool = Field(
100 | default=True,
101 | description="Include entity associations and relationships in results",
102 | ),
103 | include_accounts: bool = Field(
104 | default=True,
105 | description="Include account information in results",
106 | ),
107 | include_incidents: bool = Field(
108 | default=True,
109 | description="Include open security incidents in results",
110 | ),
111 | ) -> Dict[str, Any]:
112 | """Comprehensive entity investigation tool.
113 |
114 | This tool provides complete entity investigation capabilities including:
115 | - Entity search and details lookup
116 | - Activity timeline analysis
117 | - Relationship and association mapping
118 | - Risk assessment
119 | """
120 | logger.debug("Starting comprehensive entity investigation")
121 |
122 | # Step 1: Validate inputs
123 | validation_error = self._validate_entity_identifiers(
124 | entity_ids,
125 | entity_names,
126 | email_addresses,
127 | ip_addresses,
128 | domain_names,
129 | investigation_types,
130 | )
131 | if validation_error:
132 | return validation_error
133 |
134 | # Step 2: Entity Resolution - Find entities from various identifiers
135 | logger.debug("Resolving entities from provided identifiers")
136 | search_criteria = {
137 | "entity_ids": entity_ids,
138 | "entity_names": entity_names,
139 | "email_addresses": email_addresses,
140 | "ip_addresses": ip_addresses,
141 | "domain_names": domain_names,
142 | }
143 |
144 | resolved_entity_ids = self._resolve_entities(
145 | {
146 | "entity_ids": entity_ids if entity_ids is not None else None,
147 | "entity_names": entity_names if entity_names is not None else None,
148 | "email_addresses": email_addresses if email_addresses is not None else None,
149 | "ip_addresses": ip_addresses if ip_addresses is not None else None,
150 | "domain_names": domain_names if domain_names is not None else None,
151 | "limit": limit,
152 | }
153 | )
154 |
155 | # Check if entity resolution failed
156 | if isinstance(resolved_entity_ids, dict) and "error" in resolved_entity_ids:
157 | return self._create_error_response(
158 | resolved_entity_ids["error"],
159 | 0,
160 | investigation_types,
161 | search_criteria,
162 | )
163 |
164 | if not resolved_entity_ids:
165 | return self._create_error_response(
166 | "No entities found matching the provided criteria",
167 | 0,
168 | investigation_types,
169 | search_criteria,
170 | )
171 |
172 | logger.debug(f"Resolved {len(resolved_entity_ids)} entities for investigation")
173 |
174 | # Step 3: Execute investigations based on requested types
175 | investigation_results = {}
176 | investigation_params = {
177 | "include_associations": include_associations,
178 | "include_accounts": include_accounts,
179 | "include_incidents": include_incidents,
180 | "timeline_start_time": timeline_start_time,
181 | "timeline_end_time": timeline_end_time,
182 | "timeline_event_types": timeline_event_types,
183 | "relationship_depth": relationship_depth,
184 | "limit": limit,
185 | }
186 |
187 | for investigation_type in investigation_types:
188 | result = self._execute_single_investigation(
189 | investigation_type, resolved_entity_ids, investigation_params
190 | )
191 | if "error" in result:
192 | logger.error(f"Error in {investigation_type} investigation: {result['error']}")
193 | return self._create_error_response(
194 | f"Investigation failed during {investigation_type}: {result['error']}",
195 | len(resolved_entity_ids),
196 | investigation_types,
197 | )
198 | investigation_results[investigation_type] = result
199 |
200 | # Step 4: Synthesize comprehensive response
201 | return self._synthesize_investigation_response(
202 | resolved_entity_ids,
203 | investigation_results,
204 | {
205 | "investigation_types": investigation_types,
206 | "search_criteria": search_criteria,
207 | },
208 | )
209 |
210 | # ==========================================
211 | # Investigation Helper Methods
212 | # ==========================================
213 |
214 | def _validate_entity_identifiers(
215 | self,
216 | entity_ids,
217 | entity_names,
218 | email_addresses,
219 | ip_addresses,
220 | domain_names,
221 | investigation_types,
222 | ):
223 | """Validate that at least one entity identifier is provided."""
224 | if not any(
225 | [
226 | entity_ids,
227 | entity_names,
228 | email_addresses,
229 | ip_addresses,
230 | domain_names,
231 | ]
232 | ):
233 | return {
234 | "error": "At least one entity identifier must be provided (entity_ids, entity_names, email_addresses, ip_addresses, or domain_names)",
235 | "investigation_summary": {
236 | "entity_count": 0,
237 | "investigation_types": investigation_types,
238 | "timestamp": datetime.utcnow().isoformat(),
239 | "status": "failed",
240 | },
241 | }
242 | return None
243 |
244 | def _create_error_response(
245 | self,
246 | error_message,
247 | entity_count,
248 | investigation_types,
249 | search_criteria=None,
250 | ):
251 | """Create a standardized error response."""
252 | response = {
253 | "error": error_message,
254 | "investigation_summary": {
255 | "entity_count": entity_count,
256 | "investigation_types": investigation_types,
257 | "timestamp": datetime.utcnow().isoformat(),
258 | "status": "failed",
259 | },
260 | }
261 | if search_criteria:
262 | response["search_criteria"] = search_criteria
263 | return response
264 |
265 | def _execute_single_investigation(
266 | self,
267 | investigation_type,
268 | resolved_entity_ids,
269 | params,
270 | ):
271 | """Execute a single investigation type and return results or error."""
272 | logger.debug(f"Executing {investigation_type} investigation")
273 |
274 | if investigation_type == "entity_details":
275 | return self._get_entity_details_batch(
276 | resolved_entity_ids,
277 | {
278 | "include_associations": params.get("include_associations", True),
279 | "include_accounts": params.get("include_accounts", True),
280 | "include_incidents": params.get("include_incidents", True),
281 | },
282 | )
283 | if investigation_type == "timeline_analysis":
284 | return self._get_entity_timelines_batch(
285 | resolved_entity_ids,
286 | {
287 | "start_time": params.get("timeline_start_time"),
288 | "end_time": params.get("timeline_end_time"),
289 | "event_types": params.get("timeline_event_types"),
290 | "limit": params.get("limit", 50),
291 | },
292 | )
293 | if investigation_type == "relationship_analysis":
294 | return self._analyze_relationships_batch(
295 | resolved_entity_ids,
296 | {
297 | "relationship_depth": params.get("relationship_depth", 2),
298 | "include_risk_context": True,
299 | "limit": params.get("limit", 50),
300 | },
301 | )
302 | if investigation_type == "risk_assessment":
303 | return self._assess_risks_batch(
304 | resolved_entity_ids,
305 | {"include_risk_factors": True},
306 | )
307 |
308 | logger.warning(f"Unknown investigation type: {investigation_type}")
309 | return {"error": f"Unknown investigation type: {investigation_type}"}
310 |
311 | # ==========================================
312 | # GraphQL Query Building Helper Methods
313 | # ==========================================
314 |
315 | def _build_entity_details_query(
316 | self,
317 | entity_ids: List[str],
318 | include_risk_factors: bool,
319 | include_associations: bool,
320 | include_incidents: bool,
321 | include_accounts: bool,
322 | ) -> str:
323 | """Build GraphQL query for detailed entity information."""
324 | entity_ids_json = json.dumps(entity_ids)
325 |
326 | # Start with minimal safe fields
327 | fields = [
328 | "entityId",
329 | "primaryDisplayName",
330 | "secondaryDisplayName",
331 | "type",
332 | "riskScore",
333 | "riskScoreSeverity",
334 | ]
335 |
336 | if include_risk_factors:
337 | fields.append("""
338 | riskFactors {
339 | type
340 | severity
341 | }
342 | """)
343 |
344 | if include_associations:
345 | fields.append("""
346 | associations {
347 | bindingType
348 | ... on EntityAssociation {
349 | entity {
350 | entityId
351 | primaryDisplayName
352 | secondaryDisplayName
353 | type
354 | }
355 | }
356 | ... on LocalAdminLocalUserAssociation {
357 | accountName
358 | }
359 | ... on LocalAdminDomainEntityAssociation {
360 | entityType
361 | entity {
362 | entityId
363 | primaryDisplayName
364 | secondaryDisplayName
365 | }
366 | }
367 | ... on GeoLocationAssociation {
368 | geoLocation {
369 | country
370 | countryCode
371 | city
372 | cityCode
373 | latitude
374 | longitude
375 | }
376 | }
377 | }
378 | """)
379 |
380 | if include_incidents:
381 | fields.append("""
382 | openIncidents(first: 10) {
383 | nodes {
384 | type
385 | startTime
386 | endTime
387 | compromisedEntities {
388 | entityId
389 | primaryDisplayName
390 | }
391 | }
392 | }
393 | """)
394 |
395 | if include_accounts:
396 | fields.append("""
397 | accounts {
398 | ... on ActiveDirectoryAccountDescriptor {
399 | domain
400 | samAccountName
401 | ou
402 | servicePrincipalNames
403 | passwordAttributes {
404 | lastChange
405 | strength
406 | }
407 | expirationTime
408 | }
409 | ... on SsoUserAccountDescriptor {
410 | dataSource
411 | mostRecentActivity
412 | title
413 | creationTime
414 | passwordAttributes {
415 | lastChange
416 | }
417 | }
418 | ... on AzureCloudServiceAdapterDescriptor {
419 | registeredTenantType
420 | appOwnerOrganizationId
421 | publisherDomain
422 | signInAudience
423 | }
424 | ... on CloudServiceAdapterDescriptor {
425 | dataSourceParticipantIdentifier
426 | }
427 | }
428 | """)
429 |
430 | fields_string = "\n".join(fields)
431 |
432 | return f"""
433 | query {{
434 | entities(entityIds: {entity_ids_json}, first: 50) {{
435 | nodes {{
436 | {fields_string}
437 | }}
438 | }}
439 | }}
440 | """
441 |
442 | def _build_timeline_query(
443 | self,
444 | entity_id: str,
445 | start_time: str | None,
446 | end_time: str | None,
447 | event_types: list[str] | None,
448 | limit: int,
449 | ) -> str:
450 | """Build GraphQL query for entity timeline."""
451 | filters = [f'sourceEntityQuery: {{entityIds: ["{entity_id}"]}}']
452 |
453 | if start_time and isinstance(start_time, str):
454 | filters.append(f'startTime: "{start_time}"')
455 | if end_time and isinstance(end_time, str):
456 | filters.append(f'endTime: "{end_time}"')
457 | if event_types and isinstance(event_types, list):
458 | # Format event types as unquoted GraphQL enums
459 | categories_str = "[" + ", ".join(event_types) + "]"
460 | filters.append(f"categories: {categories_str}")
461 |
462 | filter_string = ", ".join(filters)
463 |
464 | return f"""
465 | query {{
466 | timeline({filter_string}, first: {limit}) {{
467 | nodes {{
468 | eventId
469 | eventType
470 | eventSeverity
471 | timestamp
472 | ... on TimelineUserOnEndpointActivityEvent {{
473 | sourceEntity {{
474 | entityId
475 | primaryDisplayName
476 | }}
477 | targetEntity {{
478 | entityId
479 | primaryDisplayName
480 | }}
481 | geoLocation {{
482 | country
483 | countryCode
484 | city
485 | cityCode
486 | latitude
487 | longitude
488 | }}
489 | locationAssociatedWithUser
490 | userDisplayName
491 | endpointDisplayName
492 | ipAddress
493 | }}
494 | ... on TimelineAuthenticationEvent {{
495 | sourceEntity {{
496 | entityId
497 | primaryDisplayName
498 | }}
499 | targetEntity {{
500 | entityId
501 | primaryDisplayName
502 | }}
503 | geoLocation {{
504 | country
505 | countryCode
506 | city
507 | cityCode
508 | latitude
509 | longitude
510 | }}
511 | locationAssociatedWithUser
512 | userDisplayName
513 | endpointDisplayName
514 | ipAddress
515 | }}
516 | ... on TimelineAlertEvent {{
517 | sourceEntity {{
518 | entityId
519 | primaryDisplayName
520 | }}
521 | }}
522 | ... on TimelineDceRpcEvent {{
523 | sourceEntity {{
524 | entityId
525 | primaryDisplayName
526 | }}
527 | targetEntity {{
528 | entityId
529 | primaryDisplayName
530 | }}
531 | geoLocation {{
532 | country
533 | countryCode
534 | city
535 | cityCode
536 | latitude
537 | longitude
538 | }}
539 | locationAssociatedWithUser
540 | userDisplayName
541 | endpointDisplayName
542 | ipAddress
543 | }}
544 | ... on TimelineFailedAuthenticationEvent {{
545 | sourceEntity {{
546 | entityId
547 | primaryDisplayName
548 | }}
549 | targetEntity {{
550 | entityId
551 | primaryDisplayName
552 | }}
553 | geoLocation {{
554 | country
555 | countryCode
556 | city
557 | cityCode
558 | latitude
559 | longitude
560 | }}
561 | locationAssociatedWithUser
562 | userDisplayName
563 | endpointDisplayName
564 | ipAddress
565 | }}
566 | ... on TimelineSuccessfulAuthenticationEvent {{
567 | sourceEntity {{
568 | entityId
569 | primaryDisplayName
570 | }}
571 | targetEntity {{
572 | entityId
573 | primaryDisplayName
574 | }}
575 | geoLocation {{
576 | country
577 | countryCode
578 | city
579 | cityCode
580 | latitude
581 | longitude
582 | }}
583 | locationAssociatedWithUser
584 | userDisplayName
585 | endpointDisplayName
586 | ipAddress
587 | }}
588 | ... on TimelineServiceAccessEvent {{
589 | sourceEntity {{
590 | entityId
591 | primaryDisplayName
592 | }}
593 | targetEntity {{
594 | entityId
595 | primaryDisplayName
596 | }}
597 | geoLocation {{
598 | country
599 | countryCode
600 | city
601 | cityCode
602 | latitude
603 | longitude
604 | }}
605 | locationAssociatedWithUser
606 | userDisplayName
607 | endpointDisplayName
608 | ipAddress
609 | }}
610 | ... on TimelineFileOperationEvent {{
611 | targetEntity {{
612 | entityId
613 | primaryDisplayName
614 | }}
615 | geoLocation {{
616 | country
617 | countryCode
618 | city
619 | cityCode
620 | latitude
621 | longitude
622 | }}
623 | locationAssociatedWithUser
624 | userDisplayName
625 | endpointDisplayName
626 | ipAddress
627 | }}
628 | ... on TimelineLdapSearchEvent {{
629 | sourceEntity {{
630 | entityId
631 | primaryDisplayName
632 | }}
633 | targetEntity {{
634 | entityId
635 | primaryDisplayName
636 | }}
637 | geoLocation {{
638 | country
639 | countryCode
640 | city
641 | cityCode
642 | latitude
643 | longitude
644 | }}
645 | locationAssociatedWithUser
646 | userDisplayName
647 | endpointDisplayName
648 | ipAddress
649 | }}
650 | ... on TimelineRemoteCodeExecutionEvent {{
651 | sourceEntity {{
652 | entityId
653 | primaryDisplayName
654 | }}
655 | targetEntity {{
656 | entityId
657 | primaryDisplayName
658 | }}
659 | geoLocation {{
660 | country
661 | countryCode
662 | city
663 | cityCode
664 | latitude
665 | longitude
666 | }}
667 | locationAssociatedWithUser
668 | userDisplayName
669 | endpointDisplayName
670 | ipAddress
671 | }}
672 | ... on TimelineConnectorConfigurationEvent {{
673 | category
674 | }}
675 | ... on TimelineConnectorConfigurationAddedEvent {{
676 | category
677 | }}
678 | ... on TimelineConnectorConfigurationDeletedEvent {{
679 | category
680 | }}
681 | ... on TimelineConnectorConfigurationModifiedEvent {{
682 | category
683 | }}
684 | }}
685 | pageInfo {{
686 | hasNextPage
687 | endCursor
688 | }}
689 | }}
690 | }}
691 | """
692 |
693 | def _build_relationship_analysis_query(
694 | self,
695 | entity_id: str,
696 | relationship_depth: int,
697 | include_risk_context: bool,
698 | limit: int,
699 | ) -> str:
700 | """Build GraphQL query for relationship analysis."""
701 | risk_fields = ""
702 | if include_risk_context:
703 | risk_fields = """
704 | riskScore
705 | riskScoreSeverity
706 | riskFactors {
707 | type
708 | severity
709 | }
710 | """
711 |
712 | # Build nested association fields based on relationship_depth
713 | def build_association_fields(depth: int) -> str:
714 | if depth <= 0:
715 | return ""
716 |
717 | nested_associations = ""
718 | if depth > 1:
719 | nested_associations = build_association_fields(depth - 1)
720 |
721 | return f"""
722 | associations {{
723 | bindingType
724 | ... on EntityAssociation {{
725 | entity {{
726 | entityId
727 | primaryDisplayName
728 | secondaryDisplayName
729 | type
730 | {risk_fields}
731 | {nested_associations}
732 | }}
733 | }}
734 | ... on LocalAdminLocalUserAssociation {{
735 | accountName
736 | }}
737 | ... on LocalAdminDomainEntityAssociation {{
738 | entityType
739 | entity {{
740 | entityId
741 | primaryDisplayName
742 | secondaryDisplayName
743 | type
744 | {risk_fields}
745 | {nested_associations}
746 | }}
747 | }}
748 | ... on GeoLocationAssociation {{
749 | geoLocation {{
750 | country
751 | countryCode
752 | city
753 | cityCode
754 | latitude
755 | longitude
756 | }}
757 | }}
758 | }}
759 | """
760 |
761 | association_fields = build_association_fields(relationship_depth)
762 |
763 | return f"""
764 | query {{
765 | entities(entityIds: ["{entity_id}"], first: {limit}) {{
766 | nodes {{
767 | entityId
768 | primaryDisplayName
769 | secondaryDisplayName
770 | type
771 | {risk_fields}
772 | {association_fields}
773 | }}
774 | }}
775 | }}
776 | """
777 |
778 | def _build_risk_assessment_query(
779 | self, entity_ids: List[str], include_risk_factors: bool
780 | ) -> str:
781 | """Build GraphQL query for risk assessment."""
782 | entity_ids_json = json.dumps(entity_ids)
783 |
784 | risk_fields = """
785 | riskScore
786 | riskScoreSeverity
787 | """
788 |
789 | if include_risk_factors:
790 | risk_fields += """
791 | riskFactors {
792 | type
793 | severity
794 | }
795 | """
796 |
797 | return f"""
798 | query {{
799 | entities(entityIds: {entity_ids_json}, first: 50) {{
800 | nodes {{
801 | entityId
802 | primaryDisplayName
803 | {risk_fields}
804 | }}
805 | }}
806 | }}
807 | """
808 |
809 | def _resolve_entities(self, identifiers: Dict[str, Any]) -> List[str] | Dict[str, Any]:
810 | """Resolve entity IDs from various identifier types using unified AND-based query.
811 |
812 | All provided identifiers are combined using AND logic in a single GraphQL query.
813 | For example: entity_names=["Administrator"] + domain_names=["XDRHOLDINGS.COM"]
814 | will find entities that match BOTH criteria.
815 |
816 | Returns:
817 | List[str]: List of resolved entity IDs on success
818 | Dict[str, Any]: Error response on failure
819 | """
820 | resolved_ids = []
821 |
822 | # Direct entity IDs - no resolution needed
823 | entity_ids = identifiers.get("entity_ids")
824 | if entity_ids and isinstance(entity_ids, list):
825 | resolved_ids.extend(entity_ids)
826 |
827 | # Check if we have conflicting entity types (USER vs ENDPOINT)
828 | email_addresses = identifiers.get("email_addresses")
829 | ip_addresses = identifiers.get("ip_addresses")
830 | has_user_criteria = bool(email_addresses)
831 | has_endpoint_criteria = bool(ip_addresses)
832 |
833 | # If we have both USER and ENDPOINT criteria, we need separate queries
834 | if has_user_criteria and has_endpoint_criteria:
835 | # This is a conflict - cannot search for both USER and ENDPOINT in same query
836 | # For now, prioritize USER entities (emails) over ENDPOINT entities (IPs)
837 | logger.warning(
838 | "Cannot combine email addresses (USER) and IP addresses (ENDPOINT) in single query. Prioritizing USER entities."
839 | )
840 | ip_addresses = None
841 |
842 | # Build unified GraphQL query with AND logic
843 | query_filters = []
844 | query_fields = []
845 |
846 | # Add entity names filter
847 | self._add_entity_filters(identifiers, query_fields, query_filters)
848 | # Add email addresses filter (USER entities)
849 | self._add_email_filter(email_addresses, query_fields, query_filters)
850 | # Add IP addresses filter (ENDPOINT entities) - only if no USER criteria
851 | self._add_ip_filter(has_user_criteria, ip_addresses, query_fields, query_filters)
852 | # Add domain names filter
853 | domain_names = self._add_domain_filter(identifiers, query_fields, query_filters)
854 |
855 | # If we have filters to apply, execute unified query
856 | if query_filters:
857 | # Remove duplicates from fields
858 | query_fields = list(set(query_fields))
859 | fields_string = "\n".join(query_fields)
860 |
861 | # Add account information for domain context
862 | if domain_names:
863 | fields_string += """
864 | accounts {
865 | ... on ActiveDirectoryAccountDescriptor {
866 | domain
867 | samAccountName
868 | }
869 | }"""
870 |
871 | filters_string = ", ".join(query_filters)
872 | limit = identifiers.get("limit", 50)
873 |
874 | query = f"""
875 | query {{
876 | entities({filters_string}, first: {limit}) {{
877 | nodes {{
878 | entityId
879 | {fields_string}
880 | }}
881 | }}
882 | }}
883 | """
884 |
885 | response = self.client.command("api_preempt_proxy_post_graphql", body={"query": query})
886 | result = handle_api_response(
887 | response,
888 | operation="api_preempt_proxy_post_graphql",
889 | error_message="Failed to resolve entities with combined filters",
890 | default_result=None,
891 | )
892 | if self._is_error(result):
893 | return result
894 |
895 | # Extract entities from GraphQL response structure
896 | data = response.get("body", {}).get("data", {})
897 | entities = data.get("entities", {}).get("nodes", [])
898 | resolved_ids.extend([entity["entityId"] for entity in entities])
899 |
900 | # Remove duplicates and return
901 | return list(set(resolved_ids))
902 |
903 | def _add_domain_filter(
904 | self,
905 | identifiers,
906 | query_fields,
907 | query_filters,
908 | ):
909 | domain_names = identifiers.get("domain_names")
910 | if domain_names and isinstance(domain_names, list):
911 | sanitized_domains = [sanitize_input(domain) for domain in domain_names]
912 | domains_json = json.dumps(sanitized_domains)
913 | query_filters.append(f"domains: {domains_json}")
914 | query_fields.extend(["primaryDisplayName", "secondaryDisplayName"])
915 | return domain_names
916 |
917 | def _add_ip_filter(
918 | self,
919 | has_user_criteria,
920 | ip_addresses,
921 | query_fields,
922 | query_filters,
923 | ):
924 | if ip_addresses and isinstance(ip_addresses, list) and not has_user_criteria:
925 | sanitized_ips = [sanitize_input(ip) for ip in ip_addresses]
926 | ips_json = json.dumps(sanitized_ips)
927 | query_filters.append(f"primaryDisplayNames: {ips_json}")
928 | query_filters.append("types: [ENDPOINT]")
929 | query_fields.append("primaryDisplayName")
930 |
931 | def _add_email_filter(
932 | self,
933 | email_addresses,
934 | query_fields,
935 | query_filters,
936 | ):
937 | if email_addresses and isinstance(email_addresses, list):
938 | sanitized_emails = [sanitize_input(email) for email in email_addresses]
939 | emails_json = json.dumps(sanitized_emails)
940 | query_filters.append(f"secondaryDisplayNames: {emails_json}")
941 | query_filters.append("types: [USER]")
942 | query_fields.extend(["primaryDisplayName", "secondaryDisplayName"])
943 |
944 | def _add_entity_filters(
945 | self,
946 | identifiers,
947 | query_fields,
948 | query_filters,
949 | ):
950 | entity_names = identifiers.get("entity_names")
951 | if entity_names and isinstance(entity_names, list):
952 | sanitized_names = [sanitize_input(name) for name in entity_names]
953 | names_json = json.dumps(sanitized_names)
954 | query_filters.append(f"primaryDisplayNames: {names_json}")
955 | query_fields.append("primaryDisplayName")
956 |
957 | def _get_entity_details_batch(
958 | self,
959 | entity_ids: List[str],
960 | options: Dict[str, Any],
961 | ) -> Dict[str, Any]:
962 | """Get detailed entity information for multiple entities."""
963 | graphql_query = self._build_entity_details_query(
964 | entity_ids=entity_ids,
965 | include_risk_factors=True,
966 | include_associations=options.get("include_associations", True),
967 | include_incidents=options.get("include_incidents", True),
968 | include_accounts=options.get("include_accounts", True),
969 | )
970 |
971 | response = self.client.command(
972 | "api_preempt_proxy_post_graphql",
973 | body={"query": graphql_query},
974 | )
975 | result = handle_api_response(
976 | response,
977 | operation="api_preempt_proxy_post_graphql",
978 | error_message="Failed to get entity details",
979 | default_result=None,
980 | )
981 | if self._is_error(result):
982 | return result
983 |
984 | # Extract entities from GraphQL response structure
985 | data = response.get("body", {}).get("data", {})
986 | entities = data.get("entities", {}).get("nodes", [])
987 | return {"entities": entities, "entity_count": len(entities)}
988 |
989 | def _get_entity_timelines_batch(
990 | self, entity_ids: List[str], options: Dict[str, Any]
991 | ) -> Dict[str, Any]:
992 | """Get timeline analysis for multiple entities."""
993 | timeline_results = []
994 |
995 | for entity_id in entity_ids:
996 | graphql_query = self._build_timeline_query(
997 | entity_id=entity_id,
998 | start_time=options.get("start_time"),
999 | end_time=options.get("end_time"),
1000 | event_types=options.get("event_types"),
1001 | limit=options.get("limit", 50),
1002 | )
1003 |
1004 | response = self.client.command(
1005 | "api_preempt_proxy_post_graphql",
1006 | body={"query": graphql_query},
1007 | )
1008 | result = handle_api_response(
1009 | response,
1010 | operation="api_preempt_proxy_post_graphql",
1011 | error_message=f"Failed to get timeline for entity '{entity_id}'",
1012 | default_result=None,
1013 | )
1014 | if self._is_error(result):
1015 | return result
1016 |
1017 | # Extract timeline from GraphQL response structure
1018 | data = response.get("body", {}).get("data", {})
1019 | timeline_data = data.get("timeline", {})
1020 | timeline_results.append(
1021 | {
1022 | "entity_id": entity_id,
1023 | "timeline": timeline_data.get("nodes", []),
1024 | "page_info": timeline_data.get("pageInfo", {}),
1025 | }
1026 | )
1027 |
1028 | return {"timelines": timeline_results, "entity_count": len(entity_ids)}
1029 |
1030 | def _analyze_relationships_batch(
1031 | self,
1032 | entity_ids: List[str],
1033 | options: Dict[str, Any],
1034 | ) -> Dict[str, Any]:
1035 | """Analyze relationships for multiple entities."""
1036 | relationship_results = []
1037 |
1038 | for entity_id in entity_ids:
1039 | # Handle FieldInfo objects - extract the actual value
1040 | relationship_depth = options.get("relationship_depth", 2)
1041 | if hasattr(relationship_depth, "default"):
1042 | relationship_depth = relationship_depth.default
1043 |
1044 | graphql_query = self._build_relationship_analysis_query(
1045 | entity_id=entity_id,
1046 | relationship_depth=relationship_depth,
1047 | include_risk_context=options.get("include_risk_context", True),
1048 | limit=options.get("limit", 50),
1049 | )
1050 |
1051 | response = self.client.command(
1052 | "api_preempt_proxy_post_graphql",
1053 | body={"query": graphql_query},
1054 | )
1055 | result = handle_api_response(
1056 | response,
1057 | operation="api_preempt_proxy_post_graphql",
1058 | error_message=f"Failed to analyze relationships for entity '{entity_id}'",
1059 | default_result=None,
1060 | )
1061 | if self._is_error(result):
1062 | return result
1063 |
1064 | # Extract entities from GraphQL response structure
1065 | data = response.get("body", {}).get("data", {})
1066 | entities = data.get("entities", {}).get("nodes", [])
1067 | if entities:
1068 | entity_data = entities[0]
1069 | relationship_results.append(
1070 | {
1071 | "entity_id": entity_id,
1072 | "associations": entity_data.get("associations", []),
1073 | "relationship_count": len(entity_data.get("associations", [])),
1074 | }
1075 | )
1076 | else:
1077 | relationship_results.append(
1078 | {
1079 | "entity_id": entity_id,
1080 | "associations": [],
1081 | "relationship_count": 0,
1082 | }
1083 | )
1084 |
1085 | return {"relationships": relationship_results, "entity_count": len(entity_ids)}
1086 |
1087 | def _assess_risks_batch(
1088 | self,
1089 | entity_ids: List[str],
1090 | options: Dict[str, Any],
1091 | ) -> Dict[str, Any]:
1092 | """Perform risk assessment for multiple entities."""
1093 | graphql_query = self._build_risk_assessment_query(
1094 | entity_ids=entity_ids,
1095 | include_risk_factors=options.get("include_risk_factors", True),
1096 | )
1097 |
1098 | response = self.client.command(
1099 | "api_preempt_proxy_post_graphql",
1100 | body={"query": graphql_query},
1101 | )
1102 | result = handle_api_response(
1103 | response,
1104 | operation="api_preempt_proxy_post_graphql",
1105 | error_message="Failed to assess risks",
1106 | default_result=None,
1107 | )
1108 | if self._is_error(result):
1109 | return result
1110 |
1111 | # Extract entities from GraphQL response structure
1112 | data = response.get("body", {}).get("data", {})
1113 | entities = data.get("entities", {}).get("nodes", [])
1114 | risk_assessments = []
1115 |
1116 | for entity in entities:
1117 | risk_assessments.append(
1118 | {
1119 | "entityId": entity.get("entityId"),
1120 | "primaryDisplayName": entity.get("primaryDisplayName"),
1121 | "riskScore": entity.get("riskScore", 0),
1122 | "riskScoreSeverity": entity.get("riskScoreSeverity", "LOW"),
1123 | "riskFactors": entity.get("riskFactors", []),
1124 | }
1125 | )
1126 |
1127 | return {
1128 | "risk_assessments": risk_assessments,
1129 | "entity_count": len(risk_assessments),
1130 | }
1131 |
1132 | def _synthesize_investigation_response(
1133 | self,
1134 | entity_ids: List[str],
1135 | investigation_results: Dict[str, Any],
1136 | metadata: Dict[str, Any],
1137 | ) -> Dict[str, Any]:
1138 | """Synthesize comprehensive investigation response from multiple API results."""
1139 |
1140 | # Build investigation summary
1141 | investigation_summary = {
1142 | "entity_count": len(entity_ids),
1143 | "resolved_entity_ids": entity_ids,
1144 | "investigation_types": metadata.get("investigation_types", []),
1145 | "timestamp": datetime.utcnow().isoformat(),
1146 | "status": "completed",
1147 | }
1148 |
1149 | # Add search criteria to summary
1150 | search_criteria = metadata.get("search_criteria", {})
1151 | if any(search_criteria.values()):
1152 | investigation_summary["search_criteria"] = search_criteria
1153 |
1154 | # Start building comprehensive response
1155 | response = {
1156 | "investigation_summary": investigation_summary,
1157 | "entities": entity_ids,
1158 | }
1159 |
1160 | # Add investigation results based on what was requested
1161 | for investigation_type, results in investigation_results.items():
1162 | response[investigation_type] = results
1163 |
1164 | # Generate cross-investigation insights
1165 | insights = self._generate_investigation_insights(investigation_results, entity_ids)
1166 | if insights:
1167 | response["cross_investigation_insights"] = insights
1168 |
1169 | return response
1170 |
1171 | def _generate_investigation_insights(
1172 | self,
1173 | investigation_results: Dict[str, Any],
1174 | entity_ids: List[str],
1175 | ) -> Dict[str, Any]:
1176 | """Generate insights by analyzing results across different investigation types."""
1177 | insights = {}
1178 |
1179 | # Timeline and relationship correlation
1180 | if (
1181 | "timeline_analysis" in investigation_results
1182 | and "relationship_analysis" in investigation_results
1183 | ):
1184 | insights["activity_relationship_correlation"] = self._analyze_activity_relationships(
1185 | investigation_results["timeline_analysis"],
1186 | investigation_results["relationship_analysis"],
1187 | )
1188 |
1189 | # Multi-entity patterns (if investigating multiple entities)
1190 | if len(entity_ids) > 1:
1191 | insights["multi_entity_patterns"] = self._analyze_multi_entity_patterns(
1192 | investigation_results, entity_ids
1193 | )
1194 |
1195 | return insights
1196 |
1197 | def _analyze_activity_relationships(
1198 | self,
1199 | timeline_analysis: Dict[str, Any],
1200 | relationship_analysis: Dict[str, Any],
1201 | ) -> Dict[str, Any]:
1202 | """Analyze correlation between timeline activities and entity relationships."""
1203 | correlation = {"related_entity_activities": [], "suspicious_patterns": []}
1204 |
1205 | # This would involve complex analysis of timeline events and relationships
1206 | # For now, provide basic structure
1207 | timelines = timeline_analysis.get("timelines", [])
1208 | relationships = relationship_analysis.get("relationships", [])
1209 |
1210 | correlation["timeline_count"] = len(timelines)
1211 | correlation["relationship_count"] = len(relationships)
1212 |
1213 | return correlation
1214 |
1215 | def _analyze_multi_entity_patterns(
1216 | self,
1217 | investigation_results: Dict[str, Any],
1218 | entity_ids: List[str],
1219 | ) -> Dict[str, Any]:
1220 | """Analyze patterns across multiple entities being investigated."""
1221 | patterns = {
1222 | "common_risk_factors": [],
1223 | "shared_relationships": [],
1224 | "coordinated_activities": [],
1225 | }
1226 |
1227 | # Analyze common risk factors across entities
1228 | if "risk_assessment" in investigation_results:
1229 | risk_assessments = investigation_results["risk_assessment"].get("risk_assessments", [])
1230 | risk_factor_counts = {}
1231 |
1232 | for assessment in risk_assessments:
1233 | for risk_factor in assessment.get("riskFactors", []):
1234 | risk_type = risk_factor.get("type")
1235 | if risk_type in risk_factor_counts:
1236 | risk_factor_counts[risk_type] += 1
1237 | else:
1238 | risk_factor_counts[risk_type] = 1
1239 |
1240 | # Find common risk factors (present in multiple entities)
1241 | for risk_type, count in risk_factor_counts.items():
1242 | if count > 1:
1243 | patterns["common_risk_factors"].append(
1244 | {
1245 | "risk_type": risk_type,
1246 | "entity_count": count,
1247 | "percentage": round((count / len(entity_ids)) * 100, 1),
1248 | }
1249 | )
1250 |
1251 | return patterns
1252 |
```