diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 95c96edb225..859d63bf233 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -169,7 +169,7 @@ Our rules should be written generically when possible. We use [Elastic Common Sc If the relevant [categorization values](https://www.elastic.co/guide/en/ecs/current/ecs-category-field-values-reference.html) are already defined for ECS, we use these to narrow down the event type before adding the query. Typically, the query starts with the broadest grouping possible and gets narrower for each clause. For example, we might write `event.category:process and event.type:start and process.name:net.exe and process.args:group`. First, we match process events with `event.category`, then narrow to creation events with `event.type`. Of the process creation events, we're looking for the process `net.exe` with `process.name` and finally we check the arguments `group` by looking at `process.args`. This flow has little effect on the generated Elasticsearch query, but is the most intuitive to read for rule developers. -Sometimes, it might not make sense for ECS to standardize a field, value, or category. Occasionally, we may encounter fields that specific to a single use-case or vendor. When that happens, we add an exception in [detection_rules/etc/non-ecs-schema.json](detection_rules/etc/non-ecs-schema.json). We automatically detect beats by looking at the index patterns used in a rule. If we see `winlogbeat-*`, for example, then we can validate the rule against ECS + Winlogbeat. When using a particular beat, please use `event.module` and `event.dataset` to make the rule more precise and to better nudge the validation logic. Similar to our logic flow for ECS categorization, we recommend searches progress from `event.module` → `event.dataset` → `event.action` → ``. +Sometimes, it might not make sense for ECS to standardize a field, value, or category. Occasionally, we may encounter fields that specific to a single use-case or vendor. When that happens, we add an exception in [detection_rules/etc/non-ecs-schema.json](detection_rules/etc/non-ecs-schema.json). We automatically detect beats by looking at the index patterns used in a rule. If we see `winlogbeat-*`, for example, then we can validate the rule against ECS + Winlogbeat. When using a particular beat, please use `event.module` and `data_stream.dataset` to make the rule more precise and to better nudge the validation logic. Similar to our logic flow for ECS categorization, we recommend searches progress from `event.module` → `data_stream.dataset` → `event.action` → ``. When a Pull Request is missing a necessary ECS change, please add an issue to [elastic/ecs](https://github.com/elastic/ecs) and link it from the pull request. We don't want to leave PRs blocked for too long, so if the ECS issue isn't progressing, then we can add a note and use the vendor- or beat-specific fields. We'll create another issue, reminding us to update the rule logic to switch to the ECS field when it becomes available. To maximize compatibility, we may add an `or` clause for a release or two to handle the different permutatations. After a few releases, we'll remove this and strictly require the ECS fields. diff --git a/detection_rules/beats.py b/detection_rules/beats.py index 094bf358c8a..1c31e012dc5 100644 --- a/detection_rules/beats.py +++ b/detection_rules/beats.py @@ -256,7 +256,7 @@ def get_datasets_and_modules(tree: eql.ast.BaseNode | kql.ast.BaseNode) -> tuple modules: set[str] = set() datasets: set[str] = set() - # extract out event.module and event.dataset from the query's AST + # extract out event.module, data_stream.dataset, and event.dataset from the query's AST for node in tree: # type: ignore[reportUnknownVariableType] if ( isinstance(node, eql.ast.Comparison) @@ -265,17 +265,23 @@ def get_datasets_and_modules(tree: eql.ast.BaseNode | kql.ast.BaseNode) -> tuple ): if node.left == eql.ast.Field("event", ["module"]): modules.add(node.right.render()) # type: ignore[reportUnknownMemberType] - elif node.left == eql.ast.Field("event", ["dataset"]): + elif node.left == eql.ast.Field("event", ["dataset"]) or node.left == eql.ast.Field( + "data_stream", ["dataset"] + ): datasets.add(node.right.render()) # type: ignore[reportUnknownMemberType] elif isinstance(node, eql.ast.InSet): if node.expression == eql.ast.Field("event", ["module"]): modules.update(node.get_literals()) # type: ignore[reportUnknownMemberType] - elif node.expression == eql.ast.Field("event", ["dataset"]): + elif node.expression == eql.ast.Field("event", ["dataset"]) or node.expression == eql.ast.Field( + "data_stream", ["dataset"] + ): datasets.update(node.get_literals()) # type: ignore[reportUnknownMemberType] elif isinstance(node, kql.ast.FieldComparison) and node.field == kql.ast.Field("event.module"): # type: ignore[reportUnknownMemberType] modules.update(child.value for child in node.value if isinstance(child, kql.ast.String)) # type: ignore[reportUnknownMemberType, reportUnknownVariableType] elif isinstance(node, kql.ast.FieldComparison) and node.field == kql.ast.Field("event.dataset"): # type: ignore[reportUnknownMemberType] datasets.update(child.value for child in node.value if isinstance(child, kql.ast.String)) # type: ignore[reportUnknownMemberType, reportUnknownVariableType] + elif isinstance(node, kql.ast.FieldComparison) and node.field == kql.ast.Field("data_stream.dataset"): # type: ignore[reportUnknownMemberType] + datasets.update(child.value for child in node.value if isinstance(child, kql.ast.String)) # type: ignore[reportUnknownMemberType] return datasets, modules diff --git a/detection_rules/etc/integration-manifests.json.gz b/detection_rules/etc/integration-manifests.json.gz index e4243709647..1229e0ba172 100644 Binary files a/detection_rules/etc/integration-manifests.json.gz and b/detection_rules/etc/integration-manifests.json.gz differ diff --git a/detection_rules/etc/integration-schemas.json.gz b/detection_rules/etc/integration-schemas.json.gz index 7d3b3f375a3..dd3a062b971 100644 Binary files a/detection_rules/etc/integration-schemas.json.gz and b/detection_rules/etc/integration-schemas.json.gz differ diff --git a/detection_rules/rule.py b/detection_rules/rule.py index 0c293141b70..41dc20d059f 100644 --- a/detection_rules/rule.py +++ b/detection_rules/rule.py @@ -1428,7 +1428,7 @@ def get_packaged_integrations( datasets, _ = beats.get_datasets_and_modules(data.get("ast") or []) # type: ignore[reportArgumentType] # integration is None to remove duplicate references upstream in Kibana - # chronologically, event.dataset is checked for package:integration, then rule tags + # chronologically, event.dataset, data_stream.dataset is checked for package:integration, then rule tags # if both exist, rule tags are only used if defined in definitions for non-dataset packages # of machine learning analytic packages diff --git a/detection_rules/rule_validators.py b/detection_rules/rule_validators.py index cbdd7fe2eb2..48e888e22d0 100644 --- a/detection_rules/rule_validators.py +++ b/detection_rules/rule_validators.py @@ -188,7 +188,7 @@ def validate_stack_combos(self, data: QueryRuleData, meta: RuleMeta) -> KQL_ERRO message = exc.error_msg trailer = err_trailer if "Unknown field" in message and beat_types: - trailer = f"\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}" + trailer = f"\nTry adding event.module or data_stream.dataset to specify beats module\n\n{trailer}" return kql.KqlParseError( exc.error_msg, # type: ignore[reportUnknownArgumentType] @@ -258,7 +258,7 @@ def validate_integration( # noqa: PLR0912 if exc.error_msg == "Unknown field": field = extract_error_field(self.query, exc) trailer = ( - f"\n\tTry adding event.module or event.dataset to specify integration module\n\t" + f"\n\tTry adding event.module or data_stream.dataset to specify integration module\n\t" f"Will check against integrations {meta.integration} combined.\n\t" f"{package=}, {integration=}, {integration_schema_data['package_version']=}, " f"{integration_schema_data['stack_version']=}, " @@ -512,7 +512,7 @@ def validate_integration( # noqa: PLR0912 if message == "Unknown field" or "Field not recognized" in message: field = extract_error_field(self.query, exc) trailer = ( - f"\n\tTry adding event.module or event.dataset to specify integration module\n\t" + f"\n\tTry adding event.module or data_stream.dataset to specify integration module\n\t" f"Will check against integrations {meta.integration} combined.\n\t" f"{package=}, {integration=}, {package_version=}, " f"{stack_version=}, {ecs_version=}" @@ -571,7 +571,7 @@ def validate_query_with_schema( message = exc.error_msg trailer = err_trailer if "Unknown field" in message and beat_types: - trailer = f"\nTry adding event.module or event.dataset to specify beats module\n\n{trailer}" + trailer = f"\nTry adding event.module or data_stream.dataset to specify beats module\n\n{trailer}" elif "Field not recognized" in message: text_fields = self.text_fields(schema) if text_fields: diff --git a/detection_rules/schemas/definitions.py b/detection_rules/schemas/definitions.py index 0d9317a730a..5414beae148 100644 --- a/detection_rules/schemas/definitions.py +++ b/detection_rules/schemas/definitions.py @@ -154,6 +154,7 @@ def validator_wrapper(value: Any) -> Any: "OS: Linux", "OS: macOS", "OS: Windows", + "Promotion: External Alerts", "Rule Type: BBR", "Resources: Investigation Guide", "Rule Type: Higher-Order Rule", diff --git a/pyproject.toml b/pyproject.toml index eaba322bfaa..0d39cc71f70 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "detection_rules" -version = "1.3.15" +version = "1.3.16" description = "Detection Rules is the home for rules used by Elastic Security. This repository is used for the development, maintenance, testing, validation, and release of rules for Elastic Security’s Detection Engine." readme = "README.md" requires-python = ">=3.12" diff --git a/rules/promotions/crowdstrike_external_alerts.toml b/rules/promotions/crowdstrike_external_alerts.toml new file mode 100644 index 00000000000..b1045326041 --- /dev/null +++ b/rules/promotions/crowdstrike_external_alerts.toml @@ -0,0 +1,106 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["crowdstrike"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for CrowdStrike alert promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each CrowdStrike alert written to the configured indices. Enabling this rule allows you +to immediately begin investigating CrowdStrike alerts in the app. +""" +from = "now-2m" +index = ["logs-crowdstrike.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "CrowdStrike External Alerts" +note = """## Triage and analysis + +### Investigating CrowdStrike External Alerts + +CrowdStrike Falcon is a cloud-native endpoint protection platform that delivers real-time threat detection and response capabilities. The 'Behavior - Detected - CrowdStrike Alerts' rule captures security alerts generated by Falcon and enables analysts to investigate threats rapidly based on behavioral indicators and threat intelligence. + +### Possible investigation steps + +- Review the associated process, file path, and command line to determine whether the activity is legitimate or suspicious. +- Investigate the user account and host involved in the alert to validate whether the activity was authorized. +- Cross-reference the alert with CrowdStrike Falcon console for additional context, including process tree, behavioral tags, and threat intelligence matches. +- Check for any related alerts from the same host, user, or file hash to identify whether this is part of a larger attack chain. +- Consult the Crowdstrike investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts involving known and trusted software tools (e.g., remote administration tools) may be false positives. Confirm intent before excluding. +- Security assessments or penetration testing activities might mimic real threats. Validate the activity with responsible teams. +- Scheduled jobs, IT scripts, or automation tools may trigger alerts if they behave similarly to malicious code. +- Review alerts based on detection confidence levels and behavioral scoring to filter out low-confidence or known-benign triggers. + +### Response and remediation + +- Isolate affected endpoints to prevent lateral movement if malicious behavior is confirmed. +- Quarantine any identified malicious files and block related hashes or domains. +- Investigate how the threat entered the environment and close any exploited vulnerabilities. +- Reset credentials for compromised user accounts or escalate to incident response. +- Review CrowdStrike Falcon policies and detections to fine-tune future alerting and response coverage. +- Document the findings and update detection logic or exceptions accordingly. +""" +references = ["https://docs.elastic.co/en/integrations/crowdstrike"] +risk_score = 47 +rule_id = "aeebe561-c338-4118-9924-8cb4e478aa58" +rule_name_override = "message" +setup = """## Setup + +### CrowdStrike Alert Integration +This rule is designed to capture alert events generated by the CrowdStrike integration and promote them as Elastic detection alerts. + +To capture CrowdStrike alerts, install and configure the CrowdStrike integration to ingest alert events into the `logs-crowdstrike.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same CrowdStrike events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:crowdstrike.alert to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: CrowdStrike", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +event.kind: alert and data_stream.dataset: crowdstrike.alert +''' + +[[rule.risk_score_mapping]] +field = "crowdstrike.alert.incident.score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "low" +value = "21" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "medium" +value = "47" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "high" +value = "73" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "critical" +value = "99" diff --git a/rules/promotions/elastic_security_external_alerts.toml b/rules/promotions/elastic_security_external_alerts.toml new file mode 100644 index 00000000000..ec4e5fbaf5b --- /dev/null +++ b/rules/promotions/elastic_security_external_alerts.toml @@ -0,0 +1,110 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["elastic_security"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for Elastic Security alert promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each Elastic Security alert written to the configured indices. Enabling this rule +allows you to immediately begin investigating Elastic Security alerts in the app. +""" +from = "now-2m" +index = ["logs-elastic_security.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "Elastic Security External Alerts" +note = """ +## Triage and analysis + +### Investigating Elastic Security External Alerts + +Elastic Security is a comprehensive security platform that provides real-time visibility into your environment, helping you detect and respond to threats effectively. The 'Behavior - Detected - Elastic Security Alerts' rule identifies such threats by monitoring specific alert events, enabling analysts to swiftly investigate and mitigate potential security incidents. + +### Possible investigation steps + +- Correlate the alert with recent activity on the affected endpoint to identify any unusual or suspicious behavior patterns. +- Check for any additional alerts or logs related to the same endpoint or user to determine if this is part of a broader attack or isolated incident. +- Investigate the source and destination IP addresses involved in the alert to assess if they are known to be malicious or associated with previous threats. +- Analyze any files or processes flagged in the alert to determine if they are legitimate or potentially malicious, using threat intelligence sources if necessary. +- Consult the Elastic Security investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts triggered by routine software updates or patches can be false positives. Review the context of the alert to determine if it aligns with scheduled maintenance activities. +- Legitimate administrative tools or scripts may trigger alerts. Identify and whitelist these tools if they are verified as non-threatening. +- Frequent alerts from known safe applications or processes can be excluded by creating exceptions for these specific behaviors in the Elastic Security configuration. +- Network scanning or monitoring tools used by IT teams might be flagged. Ensure these tools are documented and excluded from triggering alerts if they are part of regular operations. +- User behavior that is consistent with their role but triggers alerts should be reviewed. If deemed non-malicious, adjust the rule to exclude these specific user actions. + +### Response and remediation + +- Isolate the affected endpoint immediately to prevent lateral movement and further compromise within the network. +- Analyze the specific alert details to identify the nature of the threat and any associated indicators of compromise (IOCs). +- Remove or quarantine any malicious files or processes identified by the Elastic Security alert to neutralize the threat. +- Apply relevant security patches or updates to address any exploited vulnerabilities on the affected endpoint. +- Conduct a thorough scan of the network to identify any additional endpoints that may have been compromised or are exhibiting similar behavior. +- Document the incident and escalate to the appropriate security team or management if the threat is part of a larger attack campaign or if additional resources are needed for remediation. +- Review and update endpoint protection policies and configurations to enhance detection and prevention capabilities against similar threats in the future. +""" +references = ["https://docs.elastic.co/en/integrations/elastic_security"] +risk_score = 47 +rule_id = "720fc1aa-e195-4a1d-81d8-04edfe5313ed" +rule_name_override = "rule.name" +setup = """## Setup + +### Elastic Security Alert Integration +This rule is designed to capture alert events generated by the Elastic Security integration and promote them as Elastic detection alerts. + +To capture Elastic Security alerts, install and configure the Elastic Security integration to ingest alert events into the `logs-elastic_security.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same Elastic Security events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:elastic_security.alert to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: Elastic Security", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +event.kind: alert and data_stream.dataset: elastic_security.alert +''' + + +[[rule.risk_score_mapping]] +field = "event.risk_score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "low" +value = "21" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "medium" +value = "47" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "high" +value = "73" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "critical" +value = "99" diff --git a/rules/promotions/google_secops_external_alerts.toml b/rules/promotions/google_secops_external_alerts.toml new file mode 100644 index 00000000000..06f725134ae --- /dev/null +++ b/rules/promotions/google_secops_external_alerts.toml @@ -0,0 +1,110 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["google_secops"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for Google SecOps alert promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each Google SecOps alert written to the configured indices. Enabling this rule allows +you to immediately begin investigating Google SecOps alerts in the app. +""" +from = "now-2m" +index = ["logs-google_secops.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "Google SecOps External Alerts" +note = """Triage and analysis + +### Investigating Google SecOps External Alerts + +Google SecOps provides a robust framework for monitoring and managing security operations within cloud environments. The detection rule leverages specific event identifiers to flag suspicious alerts, enabling analysts to swiftly investigate potential threats and mitigate risks. + +### Possible investigation steps + +- Examine the timeline of events leading up to and following the alert to identify any unusual patterns or activities that may indicate malicious behavior. +- Cross-reference the alert with other security logs and alerts to determine if it is part of a broader attack pattern or isolated incident. +- Investigate the source and destination IP addresses involved in the alert to assess their legitimacy and check for any known malicious activity associated with them. +- Analyze user activity associated with the alert to identify any unauthorized access or privilege escalation attempts. +- Consult the Google SecOps investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts triggered by routine administrative actions can be false positives. Review the context of the alert to determine if it aligns with known maintenance activities. +- Automated scripts or tools that interact with Google SecOps may generate alerts. Identify these scripts and consider creating exceptions for their expected behavior. +- Frequent alerts from specific IP addresses or user accounts that are known to be secure can be excluded by adding them to an allowlist. +- Alerts resulting from testing or development environments should be reviewed and, if deemed non-threatening, excluded from triggering further alerts. +- Regularly update and review exception lists to ensure they reflect current non-threatening behaviors and do not inadvertently exclude genuine threats. + +### Response and remediation + +- Immediately isolate affected systems or accounts identified in the Google SecOps alert to prevent further unauthorized access or data exfiltration. +- Conduct a thorough review of the alert details to identify any compromised credentials or access tokens and reset them promptly. +- Implement network segmentation or access control measures to limit the spread of potential threats within the environment. +- Review and update firewall rules and security group settings to block any suspicious IP addresses or domains associated with the alert. +- Escalate the incident to the security operations center (SOC) or incident response team for further analysis and to determine if additional resources are needed. +- Document the incident, including all actions taken, and update incident response plans to incorporate lessons learned from this event. +- Enhance monitoring and detection capabilities by tuning existing alerts and deploying additional rules to detect similar activities in the future. +""" +references = ["https://docs.elastic.co/en/integrations/google_secops"] +risk_score = 47 +rule_id = "70558fd5-6448-4c65-804a-8567ce02c3a2" +rule_name_override = "google_secops.alert.detection.ruleName" +setup = """## Setup + +### Google SecOps Alert Integration +This rule is designed to capture alert events generated by the Google SecOps integration and promote them as Elastic detection alerts. + +To capture Google SecOps alerts, install and configure the Google SecOps integration to ingest alert events into the `logs-google_secops.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same SecOps events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:google_secops.alert to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: Google SecOps", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +event.kind: alert and data_stream.dataset: google_secops.alert +''' + +[[rule.risk_score_mapping]] +field = "event.risk_score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "low" +value = "21" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "medium" +value = "47" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "high" +value = "73" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "critical" +value = "99" + + diff --git a/rules/promotions/microsoft_sentinel_external_alerts.toml b/rules/promotions/microsoft_sentinel_external_alerts.toml new file mode 100644 index 00000000000..651bc23a73a --- /dev/null +++ b/rules/promotions/microsoft_sentinel_external_alerts.toml @@ -0,0 +1,103 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["microsoft_sentinel"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for Microsoft Sentinel alert promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each Microsoft Sentinel alert written to the configured indices. Enabling this rule +allows you to immediately begin investigating Microsoft Sentinel alerts in the app. +""" +from = "now-2m" +index = ["logs-microsoft_sentinel.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "Microsoft Sentinel External Alerts" +note = """ Triage and analysis + +## Investigating Microsoft Sentinel External Alerts + +Microsoft Sentinel is a cloud-native SIEM tool that aggregates security data for threat detection and response. The 'Behavior - Detected' rule identifies each alert logged in Sentinel, enabling analysts to swiftly investigate potential threats. + +### Possible investigation steps + +- Examine the timeline of events leading up to the alert to identify any unusual or suspicious activities that may have occurred. +- Cross-reference the alert with other related alerts or logs in Microsoft Sentinel to determine if this is part of a larger pattern or isolated incident. +- Investigate the source and context of the alert to identify any patterns or anomalies that could indicate manipulation or false positives. +- Consult the Microsoft Sentinel investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts triggered by routine administrative tasks can be false positives. Identify these tasks and create exceptions to prevent unnecessary alerts. +- Frequent alerts from known safe IP addresses or domains may not indicate a threat. Whitelist these sources to reduce noise. +- Alerts generated by automated scripts or scheduled tasks that are part of regular operations can be excluded by setting up filters for these specific activities. +- Non-threatening alerts from internal network scans or vulnerability assessments should be reviewed and excluded if they are part of regular security practices. +- Alerts from test environments or sandboxed systems can be false positives. Exclude these environments from alert generation to focus on genuine threats. + +### Response and remediation + +- Contain the threat by isolating affected systems from the network to prevent further spread or data exfiltration. +- Review and terminate any suspicious processes or sessions identified in the alert to halt ongoing malicious activities. +- Conduct a thorough analysis of the alert details to identify any compromised accounts or credentials and reset passwords immediately. +- Apply relevant security patches or updates to affected systems to close any vulnerabilities exploited by the adversary. +- Restore affected systems from clean backups to ensure the integrity and security of the environment. +- Monitor network traffic and system logs closely for any signs of recurring or related suspicious activities. +- Escalate the incident to the security operations center (SOC) or incident response team for further investigation and to determine if additional resources are needed. +""" +references = ["https://docs.elastic.co/en/integrations/microsoft_sentinel"] +risk_score = 47 +rule_id = "74147312-ba03-4bea-91d1-040d54c1e8c3" +rule_name_override = "microsoft_sentinel.alert.properties.friendly_name" +setup = """## Setup + +### Microsoft Sentinel Alert Integration +This rule is designed to capture alert events generated by the Microsoft Sentinel integration and promote them as Elastic detection alerts. + +To capture Microsoft Sentinel alerts, install and configure the Microsoft Sentinel integration to ingest alert events into the `logs-microsoft_sentinel.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same Sentinel events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:microsoft_sentinel.alert to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: Microsoft Sentinel", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +event.kind: alert and data_stream.dataset: microsoft_sentinel.alert +''' + +[[rule.risk_score_mapping]] +field = "microsoft_sentinel.alert.properties.confidence_score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "low" +value = "1" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "medium" +value = "2" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "high" +value = "3" + + diff --git a/rules/promotions/sentinelone_external_alerts.toml b/rules/promotions/sentinelone_external_alerts.toml new file mode 100644 index 00000000000..59ccb47a92d --- /dev/null +++ b/rules/promotions/sentinelone_external_alerts.toml @@ -0,0 +1,111 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["sentinel_one"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for SentinelOne alert promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each SentinelOne alert written to the configured indices. Enabling this rule allows you +to immediately begin investigating SentinelOne alerts in the app. +""" +from = "now-2m" +index = ["logs-sentinel_one.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "SentinelOne External Alerts" +note = """## Triage and analysis + +### Investigating SentinelOne External Alerts + +SentinelOne is a cybersecurity platform that provides endpoint protection by detecting and responding to threats in real-time. The 'Behavior - Detected - SentinelOne Alerts' rule identifies such threats by monitoring specific alert events, enabling analysts to swiftly investigate and mitigate potential security incidents. + +### Possible investigation steps + +- Correlate the alert with recent activity on the affected endpoint to identify any unusual or suspicious behavior patterns. +- Check for any additional alerts or logs related to the same endpoint or user to determine if this is part of a broader attack or isolated incident. +- Investigate the source and destination IP addresses involved in the alert to assess if they are known to be malicious or associated with previous threats. +- Analyze any files or processes flagged in the alert to determine if they are legitimate or potentially malicious, using threat intelligence sources if necessary. +- Consult the SentinelOne investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts triggered by routine software updates or patches can be false positives. Review the context of the alert to determine if it aligns with scheduled maintenance activities. +- Legitimate administrative tools or scripts may trigger alerts. Identify and whitelist these tools if they are verified as non-threatening. +- Frequent alerts from known safe applications or processes can be excluded by creating exceptions for these specific behaviors in the SentinelOne configuration. +- Network scanning or monitoring tools used by IT teams might be flagged. Ensure these tools are documented and excluded from triggering alerts if they are part of regular operations. +- User behavior that is consistent with their role but triggers alerts should be reviewed. If deemed non-malicious, adjust the rule to exclude these specific user actions. + +### Response and remediation + +- Isolate the affected endpoint immediately to prevent lateral movement and further compromise within the network. +- Analyze the specific alert details to identify the nature of the threat and any associated indicators of compromise (IOCs). +- Remove or quarantine any malicious files or processes identified by the SentinelOne alert to neutralize the threat. +- Apply relevant security patches or updates to address any exploited vulnerabilities on the affected endpoint. +- Conduct a thorough scan of the network to identify any additional endpoints that may have been compromised or are exhibiting similar behavior. +- Document the incident and escalate to the appropriate security team or management if the threat is part of a larger attack campaign or if additional resources are needed for remediation. +- Review and update endpoint protection policies and configurations to enhance detection and prevention capabilities against similar threats in the future. +""" +references = ["https://docs.elastic.co/en/integrations/sentinel_one"] +risk_score = 47 +rule_id = "9b35422b-9102-45a9-8610-2e0c22281c55" +rule_name_override = "sentinel_one.alert.rule.name" +setup = """## Setup + +### SentinelOne Alert Integration +This rule is designed to capture alert events generated by the SentinelOne integration and promote them as Elastic detection alerts. + +To capture SentinelOne alerts, install and configure the SentinelOne integration to ingest alert events into the `logs-sentinel_one.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same SentinelOne events. Consider adding a rule exception for the External Alert rule to exclude datastream.dataset: (sentinel_one.alert or sentinel_one.threat) to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: SentinelOne", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +(event.kind: alert and data_stream.dataset: sentinel_one.threat) or (event.kind: event and data_stream.dataset: sentinel_one.alert) +''' + + +[[rule.risk_score_mapping]] +field = "event.risk_score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "sentinel_one.alert.rule.severity" +operator = "equals" +severity = "low" +value = "Low" + +[[rule.severity_mapping]] +field = "sentinel_one.alert.rule.severity" +operator = "equals" +severity = "medium" +value = "Medium" + +[[rule.severity_mapping]] +field = "sentinel_one.alert.rule.severity" +operator = "equals" +severity = "high" +value = "High" + +[[rule.severity_mapping]] +field = "sentinel_one.alert.rule.severity" +operator = "equals" +severity = "critical" +value = "Critical" + + diff --git a/rules/promotions/splunk_external_alerts.toml b/rules/promotions/splunk_external_alerts.toml new file mode 100644 index 00000000000..44b9532f86f --- /dev/null +++ b/rules/promotions/splunk_external_alerts.toml @@ -0,0 +1,111 @@ +[metadata] +creation_date = "2025/07/31" +integration = ["splunk"] +maturity = "production" +promotion = true +min_stack_version = "8.18.0" +min_stack_comments = "Introduced support for Splunk alert integration and promotion" +updated_date = "2025/07/31" + +[rule] +author = ["Elastic"] +description = """ +Generates a detection alert for each Splunk alert written to the configured indices. Enabling this rule allows you to +immediately begin investigating Splunk alerts in the app. +""" +from = "now-2m" +index = ["logs-splunk.alert-*"] +interval = "1m" +language = "kuery" +license = "Elastic License v2" +max_signals = 1000 +name = "Splunk External Alerts" +note = """## Triage and analysis + +### Investigating Splunk External Alerts + +Splunk monitors and analyzes data, often used in security environments to track and respond to potential threats. The 'Behavior - Detected - Splunk Alerts' rule identifies such manipulations by flagging alerts enabling timely investigation and response. + +### Possible investigation steps + +- Examine the specific indices where the alert was written to identify any unusual or unauthorized activity. +- Cross-reference the alert with recent changes or activities in the Splunk environment to determine if the alert could be a result of legitimate administrative actions. +- Investigate the source and context of the alert to identify any patterns or anomalies that could indicate manipulation or false positives. +- Check for any related alerts or logs that might provide additional context or evidence of adversarial behavior. +- Consult the Splunk investigation guide and resources tagged in the alert for specific guidance on handling similar threats. + +### False positive analysis + +- Alerts triggered by routine Splunk maintenance activities can be false positives. To manage these, identify and document regular maintenance schedules and create exceptions for alerts generated during these times. +- Frequent alerts from specific indices that are known to contain non-threatening data can be excluded by adjusting the rule to ignore these indices, ensuring only relevant alerts are investigated. +- Alerts generated by automated scripts or tools that interact with Splunk for legitimate purposes can be false positives. Review and whitelist these scripts or tools to prevent unnecessary alerts. +- If certain user actions consistently trigger alerts but are verified as non-malicious, consider creating user-specific exceptions to reduce noise and focus on genuine threats. +- Regularly review and update the list of exceptions to ensure they remain relevant and do not inadvertently exclude new or evolving threats. + +### Response and remediation + +- Immediately isolate affected systems to prevent further manipulation of Splunk alerts and potential spread of malicious activity. +- Review and validate the integrity of the Splunk alert indices to ensure no unauthorized changes have been made. +- Restore any compromised Splunk alert configurations from a known good backup to ensure accurate monitoring and alerting. +- Conduct a thorough audit of user access and permissions within Splunk to identify and revoke any unauthorized access. +- Escalate the incident to the security operations center (SOC) for further analysis and to determine if additional systems or data have been affected. +- Implement enhanced monitoring on Splunk indices to detect any future unauthorized changes or suspicious activities. +- Document the incident details and response actions taken for future reference and to improve incident response procedures. +""" +references = ["https://docs.elastic.co/en/integrations/splunk"] +risk_score = 47 +rule_id = "d3b6222f-537e-4b84-956a-3ebae2dcf811" +rule_name_override = "splunk.alert.source" +setup = """## Setup + +### Splunk Alert Integration +This rule is designed to capture alert events generated by the Splunk integration and promote them as Elastic detection alerts. + +To capture Splunk alerts, install and configure the Splunk integration to ingest alert events into the `logs-splunk.alert-*` index pattern. + +If this rule is enabled alongside the External Alerts promotion rule (UUID: eb079c62-4481-4d6e-9643-3ca499df7aaa), you may receive duplicate alerts for the same Splunk events. Consider adding a rule exception for the External Alert rule to exclude data_stream.dataset:splunk.alert to avoid receiving duplicate alerts. + +### Additional notes + +For information on troubleshooting the maximum alerts warning please refer to this [guide](https://www.elastic.co/guide/en/security/current/alerts-ui-monitor.html#troubleshoot-max-alerts). +""" +severity = "medium" +tags = ["Data Source: Splunk", "Use Case: Threat Detection", "Resources: Investigation Guide", "Promotion: External Alerts"] +timestamp_override = "event.ingested" +type = "query" + +query = ''' +event.kind: alert and data_stream.dataset: splunk.alert +''' + + +[[rule.risk_score_mapping]] +field = "event.risk_score" +operator = "equals" +value = "" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "low" +value = "21" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "medium" +value = "47" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "high" +value = "73" + +[[rule.severity_mapping]] +field = "event.severity" +operator = "equals" +severity = "critical" +value = "99" + + diff --git a/tests/test_all_rules.py b/tests/test_all_rules.py index 840753e5c85..5b43989add0 100644 --- a/tests/test_all_rules.py +++ b/tests/test_all_rules.py @@ -510,10 +510,10 @@ def test_investigation_guide_tag(self): def test_tag_prefix(self): """Ensure all tags have a prefix from an expected list.""" invalid = [] - + expected_prefixes = {tag.split(":")[0] + ":" for tag in definitions.EXPECTED_RULE_TAGS} for rule in self.all_rules: rule_tags = rule.contents.data.tags - expected_prefixes = {tag.split(":")[0] + ":" for tag in definitions.EXPECTED_RULE_TAGS} + invalid.extend( [ f"{self.rule_str(rule)}-{tag}"