From d73e0cd1cddc78898fcb7b1a0c845b1c76caa0d2 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Wed, 26 Feb 2025 16:57:35 +0100 Subject: [PATCH 1/5] fix(stats): Use Finding instead of Check_Report --- prowler/__main__.py | 24 ++++++------ prowler/lib/outputs/outputs.py | 23 ++++++------ tests/lib/outputs/outputs_test.py | 62 +++++++++++++++---------------- 3 files changed, 55 insertions(+), 54 deletions(-) diff --git a/prowler/__main__.py b/prowler/__main__.py index c01f2a0a56..1e535daea4 100644 --- a/prowler/__main__.py +++ b/prowler/__main__.py @@ -305,6 +305,18 @@ def prowler(): print(f"{Style.BRIGHT}{Fore.GREEN}\nNo findings to fix!{Style.RESET_ALL}\n") sys.exit() + # Outputs + # TODO: this part is needed since the checks generates a Check_Report_XXX and the output uses Finding + # This will be refactored for the outputs generate directly the Finding + finding_outputs = [] + for finding in findings: + try: + finding_outputs.append( + Finding.generate_output(global_provider, finding, output_options) + ) + except Exception: + continue + # Extract findings stats stats = extract_findings_statistics(findings) @@ -329,18 +341,6 @@ def prowler(): ) sys.exit(1) - # Outputs - # TODO: this part is needed since the checks generates a Check_Report_XXX and the output uses Finding - # This will be refactored for the outputs generate directly the Finding - finding_outputs = [] - for finding in findings: - try: - finding_outputs.append( - Finding.generate_output(global_provider, finding, output_options) - ) - except Exception: - continue - generated_outputs = {"regular": [], "compliance": []} if args.output_formats: diff --git a/prowler/lib/outputs/outputs.py b/prowler/lib/outputs/outputs.py index b4699157d7..49a1738ed6 100644 --- a/prowler/lib/outputs/outputs.py +++ b/prowler/lib/outputs/outputs.py @@ -2,6 +2,7 @@ from prowler.config.config import orange_color from prowler.lib.logger import logger +from prowler.lib.outputs.finding import Finding def stdout_report(finding, color, verbose, status, fix): @@ -89,7 +90,7 @@ def set_report_color(status: str, muted: bool = False) -> str: return color -def extract_findings_statistics(findings: list) -> dict: +def extract_findings_statistics(findings: list[Finding]) -> dict: """ extract_findings_statistics takes a list of findings and returns the following dict with the aggregated statistics { @@ -123,17 +124,17 @@ def extract_findings_statistics(findings: list) -> dict: low_severity_fail = 0 for finding in findings: - # Save the resource_id - resources.add(finding.resource_id) + # Save the resource_uid + resources.add(finding.resource_uid) if finding.status == "PASS": - if finding.check_metadata.Severity == "critical": + if finding.metadata.Severity == "critical": critical_severity_pass += 1 - if finding.check_metadata.Severity == "high": + if finding.metadata.Severity == "high": high_severity_pass += 1 - if finding.check_metadata.Severity == "medium": + if finding.metadata.Severity == "medium": medium_severity_pass += 1 - if finding.check_metadata.Severity == "low": + if finding.metadata.Severity == "low": low_severity_pass += 1 total_pass += 1 findings_count += 1 @@ -141,13 +142,13 @@ def extract_findings_statistics(findings: list) -> dict: muted_pass += 1 if finding.status == "FAIL": - if finding.check_metadata.Severity == "critical": + if finding.metadata.Severity == "critical": critical_severity_fail += 1 - if finding.check_metadata.Severity == "high": + if finding.metadata.Severity == "high": high_severity_fail += 1 - if finding.check_metadata.Severity == "medium": + if finding.metadata.Severity == "medium": medium_severity_fail += 1 - if finding.check_metadata.Severity == "low": + if finding.metadata.Severity == "low": low_severity_fail += 1 total_fail += 1 findings_count += 1 diff --git a/tests/lib/outputs/outputs_test.py b/tests/lib/outputs/outputs_test.py index dbf7d5c8bd..9f34cc0721 100644 --- a/tests/lib/outputs/outputs_test.py +++ b/tests/lib/outputs/outputs_test.py @@ -246,13 +246,15 @@ def test_parse_json_tags(self): assert parse_json_tags([{}]) == {} assert parse_json_tags(None) == {} + +class TestExtractFindingStats: def test_extract_findings_statistics_different_resources(self): finding_1 = mock.MagicMock() finding_1.status = "PASS" - finding_1.resource_id = "test_resource_1" + finding_1.resource_uid = "test_resource_1" finding_2 = mock.MagicMock() finding_2.status = "FAIL" - finding_2.resource_id = "test_resource_2" + finding_2.resource_uid = "test_resource_2" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -266,10 +268,10 @@ def test_extract_findings_statistics_different_resources(self): def test_extract_findings_statistics_same_resources(self): finding_1 = mock.MagicMock() finding_1.status = "PASS" - finding_1.resource_id = "test_resource_1" + finding_1.resource_uid = "test_resource_1" finding_2 = mock.MagicMock() finding_2.status = "PASS" - finding_2.resource_id = "test_resource_1" + finding_2.resource_uid = "test_resource_1" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -283,10 +285,10 @@ def test_extract_findings_statistics_same_resources(self): def test_extract_findings_statistics_manual_resources(self): finding_1 = mock.MagicMock() finding_1.status = "MANUAL" - finding_1.resource_id = "test_resource_1" + finding_1.resource_uid = "test_resource_1" finding_2 = mock.MagicMock() finding_2.status = "PASS" - finding_2.resource_id = "test_resource_1" + finding_2.resource_uid = "test_resource_1" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -320,15 +322,15 @@ def test_extract_findings_statistics_all_fail_are_muted(self): finding_1 = mock.MagicMock() finding_1.status = "FAIL" finding_1.muted = True - finding_1.resource_id = "test_resource_1" - finding_1.check_metadata.Severity = "medium" - finding_1.check_metadata.CheckID = "glue_etl_jobs_amazon_s3_encryption_enabled" + finding_1.resource_uid = "test_resource_1" + finding_1.metadata.Severity = "medium" + finding_1.metadata.CheckID = "glue_etl_jobs_amazon_s3_encryption_enabled" finding_2 = mock.MagicMock() finding_2.status = "FAIL" finding_2.muted = True - finding_2.resource_id = "test_resource_2" - finding_2.check_metadata.Severity = "low" - finding_2.check_metadata.CheckID = "lightsail_static_ip_unused" + finding_2.resource_uid = "test_resource_2" + finding_2.metadata.Severity = "low" + finding_2.metadata.CheckID = "lightsail_static_ip_unused" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -352,17 +354,15 @@ def test_extract_findings_statistics_all_fail_are_not_muted(self): finding_1 = mock.MagicMock() finding_1.status = "FAIL" finding_1.muted = True - finding_1.resource_id = "test_resource_1" - finding_1.check_metadata.Severity = "critical" - finding_1.check_metadata.CheckID = "rds_instance_certificate_expiration" + finding_1.resource_uid = "test_resource_1" + finding_1.metadata.Severity = "critical" + finding_1.metadata.CheckID = "rds_instance_certificate_expiration" finding_2 = mock.MagicMock() finding_2.status = "FAIL" finding_2.muted = False - finding_2.resource_id = "test_resource_1" - finding_2.check_metadata.Severity = "critical" - finding_2.check_metadata.CheckID = ( - "autoscaling_find_secrets_ec2_launch_configuration" - ) + finding_2.resource_uid = "test_resource_1" + finding_2.metadata.Severity = "critical" + finding_2.metadata.CheckID = "autoscaling_find_secrets_ec2_launch_configuration" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -386,18 +386,16 @@ def test_extract_findings_statistics_all_passes_are_not_muted(self): finding_1 = mock.MagicMock() finding_1.status = "PASS" finding_1.muted = True - finding_1.resource_id = "test_resource_1" - finding_1.check_metadata.Severity = "critical" - finding_1.check_metadata.CheckID = ( - "autoscaling_find_secrets_ec2_launch_configuration" - ) + finding_1.resource_uid = "test_resource_1" + finding_1.metadata.Severity = "critical" + finding_1.metadata.CheckID = "autoscaling_find_secrets_ec2_launch_configuration" finding_2 = mock.MagicMock() finding_2.status = "PASS" finding_2.muted = False - finding_2.resource_id = "test_resource_1" - finding_2.check_metadata.Severity = "high" - finding_2.check_metadata.CheckID = "acm_certificates_expiration_check" + finding_2.resource_uid = "test_resource_1" + finding_2.metadata.Severity = "high" + finding_2.metadata.CheckID = "acm_certificates_expiration_check" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) @@ -420,9 +418,9 @@ def test_extract_findings_statistics_all_passes_are_muted(self): finding_1 = mock.MagicMock() finding_1.status = "PASS" finding_1.muted = True - finding_1.check_metadata.Severity = "critical" - finding_1.check_metadata.CheckID = "rds_instance_certificate_expiration" - finding_1.resource_id = "test_resource_1" + finding_1.metadata.Severity = "critical" + finding_1.metadata.CheckID = "rds_instance_certificate_expiration" + finding_1.resource_uid = "test_resource_1" findings = [finding_1] stats = extract_findings_statistics(findings) @@ -441,6 +439,8 @@ def test_extract_findings_statistics_all_passes_are_muted(self): assert stats["total_low_severity_pass"] == 0 assert stats["findings_count"] == 1 + +class TestReport: def test_report_with_aws_provider_not_muted_pass(self): # Mocking check_findings and provider finding_1 = MagicMock() From ef2df6e0a8e5ec7a0946cfdc82277c5adabb04cd Mon Sep 17 00:00:00 2001 From: pedrooot Date: Wed, 26 Feb 2025 17:57:36 +0100 Subject: [PATCH 2/5] feat(findings): use finding output for stats --- prowler/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prowler/__main__.py b/prowler/__main__.py index 1e535daea4..27314d7ad4 100644 --- a/prowler/__main__.py +++ b/prowler/__main__.py @@ -318,7 +318,7 @@ def prowler(): continue # Extract findings stats - stats = extract_findings_statistics(findings) + stats = extract_findings_statistics(finding_outputs) if args.slack: # TODO: this should be also in a config file From 5851f5e0e35ebc237858a3ba94c4a1a76400dc50 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Thu, 27 Feb 2025 11:57:58 +0100 Subject: [PATCH 3/5] refactor(stats): use Finding attributes and fix tests --- prowler/lib/outputs/outputs.py | 46 +++--- tests/lib/outputs/outputs_test.py | 225 +++++++++++++++++++----------- 2 files changed, 176 insertions(+), 95 deletions(-) diff --git a/prowler/lib/outputs/outputs.py b/prowler/lib/outputs/outputs.py index 49a1738ed6..dfd37429dd 100644 --- a/prowler/lib/outputs/outputs.py +++ b/prowler/lib/outputs/outputs.py @@ -1,7 +1,9 @@ from colorama import Fore, Style from prowler.config.config import orange_color +from prowler.lib.check.models import Severity from prowler.lib.logger import logger +from prowler.lib.outputs.common import Status from prowler.lib.outputs.finding import Finding @@ -122,38 +124,46 @@ def extract_findings_statistics(findings: list[Finding]) -> dict: medium_severity_fail = 0 low_severity_pass = 0 low_severity_fail = 0 + informational_severity_pass = 0 + informational_severity_fail = 0 for finding in findings: - # Save the resource_uid resources.add(finding.resource_uid) - if finding.status == "PASS": - if finding.metadata.Severity == "critical": + if finding.status == Status.PASS: + findings_count += 1 + total_pass += 1 + if finding.metadata.Severity == Severity.critical: critical_severity_pass += 1 - if finding.metadata.Severity == "high": + if finding.metadata.Severity == Severity.high: high_severity_pass += 1 - if finding.metadata.Severity == "medium": + if finding.metadata.Severity == Severity.medium: medium_severity_pass += 1 - if finding.metadata.Severity == "low": + if finding.metadata.Severity == Severity.low: low_severity_pass += 1 - total_pass += 1 - findings_count += 1 + if finding.metadata.Severity == Severity.informational: + informational_severity_pass += 1 + if finding.muted is True: muted_pass += 1 - if finding.status == "FAIL": - if finding.metadata.Severity == "critical": + if finding.status == Status.FAIL: + findings_count += 1 + total_fail += 1 + if finding.metadata.Severity == Severity.critical: critical_severity_fail += 1 - if finding.metadata.Severity == "high": + if finding.metadata.Severity == Severity.high: high_severity_fail += 1 - if finding.metadata.Severity == "medium": + if finding.metadata.Severity == Severity.medium: medium_severity_fail += 1 - if finding.metadata.Severity == "low": + if finding.metadata.Severity == Severity.low: low_severity_fail += 1 - total_fail += 1 - findings_count += 1 + if finding.metadata.Severity == Severity.informational: + informational_severity_fail += 1 + if finding.muted is True: muted_fail += 1 + if not finding.muted and all_fails_are_muted: all_fails_are_muted = False @@ -169,8 +179,10 @@ def extract_findings_statistics(findings: list[Finding]) -> dict: stats["total_high_severity_pass"] = high_severity_pass stats["total_medium_severity_fail"] = medium_severity_fail stats["total_medium_severity_pass"] = medium_severity_pass - stats["total_low_severity_fail"] = medium_severity_fail - stats["total_low_severity_pass"] = medium_severity_pass + stats["total_low_severity_fail"] = low_severity_fail + stats["total_low_severity_pass"] = low_severity_pass + stats["total_informational_severity_pass"] = informational_severity_pass + stats["total_informational_severity_fail"] = informational_severity_fail stats["all_fails_are_muted"] = all_fails_are_muted return stats diff --git a/tests/lib/outputs/outputs_test.py b/tests/lib/outputs/outputs_test.py index 9f34cc0721..b06be2eea5 100644 --- a/tests/lib/outputs/outputs_test.py +++ b/tests/lib/outputs/outputs_test.py @@ -18,6 +18,7 @@ unroll_list, unroll_tags, ) +from tests.lib.outputs.fixtures.fixtures import generate_finding_output class TestOutputs: @@ -249,64 +250,118 @@ def test_parse_json_tags(self): class TestExtractFindingStats: def test_extract_findings_statistics_different_resources(self): - finding_1 = mock.MagicMock() - finding_1.status = "PASS" - finding_1.resource_uid = "test_resource_1" - finding_2 = mock.MagicMock() - finding_2.status = "FAIL" - finding_2.resource_uid = "test_resource_2" + finding_1 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + finding_2 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_2", + severity="critical", + muted=False, + ) + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 1 - assert stats["total_fail"] == 1 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 1 assert stats["total_muted_fail"] == 0 assert stats["resources_count"] == 2 assert stats["findings_count"] == 2 + assert stats["total_critical_severity_fail"] == 1 + assert stats["total_critical_severity_pass"] == 1 + assert stats["total_high_severity_fail"] == 0 + assert stats["total_high_severity_pass"] == 0 + assert stats["total_medium_severity_fail"] == 0 + assert stats["total_medium_severity_pass"] == 0 + assert stats["total_low_severity_fail"] == 0 + assert stats["total_low_severity_pass"] == 0 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is False def test_extract_findings_statistics_same_resources(self): - finding_1 = mock.MagicMock() - finding_1.status = "PASS" - finding_1.resource_uid = "test_resource_1" - finding_2 = mock.MagicMock() - finding_2.status = "PASS" - finding_2.resource_uid = "test_resource_1" + finding_1 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + finding_2 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 2 - assert stats["total_fail"] == 0 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 0 assert stats["total_muted_fail"] == 0 assert stats["resources_count"] == 1 assert stats["findings_count"] == 2 + assert stats["total_critical_severity_fail"] == 0 + assert stats["total_critical_severity_pass"] == 2 + assert stats["total_high_severity_fail"] == 0 + assert stats["total_high_severity_pass"] == 0 + assert stats["total_medium_severity_fail"] == 0 + assert stats["total_medium_severity_pass"] == 0 + assert stats["total_low_severity_fail"] == 0 + assert stats["total_low_severity_pass"] == 0 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is True def test_extract_findings_statistics_manual_resources(self): - finding_1 = mock.MagicMock() - finding_1.status = "MANUAL" - finding_1.resource_uid = "test_resource_1" - finding_2 = mock.MagicMock() - finding_2.status = "PASS" - finding_2.resource_uid = "test_resource_1" + finding_1 = generate_finding_output( + status="MANUAL", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + finding_2 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 1 - assert stats["total_fail"] == 0 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 0 assert stats["total_muted_fail"] == 0 assert stats["resources_count"] == 1 assert stats["findings_count"] == 1 + assert stats["total_critical_severity_fail"] == 0 + assert stats["total_critical_severity_pass"] == 1 + assert stats["total_high_severity_fail"] == 0 + assert stats["total_high_severity_pass"] == 0 + assert stats["total_medium_severity_fail"] == 0 + assert stats["total_medium_severity_pass"] == 0 + assert stats["total_low_severity_fail"] == 0 + assert stats["total_low_severity_pass"] == 0 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] == 0 def test_extract_findings_statistics_no_findings(self): - findings = [] - - stats = extract_findings_statistics(findings) + stats = extract_findings_statistics([]) assert stats["total_pass"] == 0 - assert stats["total_fail"] == 0 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 0 assert stats["total_muted_fail"] == 0 + assert stats["resources_count"] == 0 + assert stats["findings_count"] == 0 assert stats["total_critical_severity_fail"] == 0 assert stats["total_critical_severity_pass"] == 0 assert stats["total_high_severity_fail"] == 0 @@ -315,28 +370,30 @@ def test_extract_findings_statistics_no_findings(self): assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 0 assert stats["total_low_severity_pass"] == 0 - assert stats["resources_count"] == 0 - assert stats["findings_count"] == 0 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is True def test_extract_findings_statistics_all_fail_are_muted(self): - finding_1 = mock.MagicMock() - finding_1.status = "FAIL" - finding_1.muted = True - finding_1.resource_uid = "test_resource_1" - finding_1.metadata.Severity = "medium" - finding_1.metadata.CheckID = "glue_etl_jobs_amazon_s3_encryption_enabled" - finding_2 = mock.MagicMock() - finding_2.status = "FAIL" - finding_2.muted = True - finding_2.resource_uid = "test_resource_2" - finding_2.metadata.Severity = "low" - finding_2.metadata.CheckID = "lightsail_static_ip_unused" + finding_1 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_1", + severity="medium", + muted=True, + ) + finding_2 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_2", + severity="low", + muted=True, + ) + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 0 - assert stats["total_fail"] == 2 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 2 assert stats["total_muted_fail"] == 2 assert stats["resources_count"] == 2 assert stats["findings_count"] == 2 @@ -348,29 +405,33 @@ def test_extract_findings_statistics_all_fail_are_muted(self): assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 1 assert stats["total_low_severity_pass"] == 0 - assert stats["all_fails_are_muted"] + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is True def test_extract_findings_statistics_all_fail_are_not_muted(self): - finding_1 = mock.MagicMock() - finding_1.status = "FAIL" - finding_1.muted = True - finding_1.resource_uid = "test_resource_1" - finding_1.metadata.Severity = "critical" - finding_1.metadata.CheckID = "rds_instance_certificate_expiration" - finding_2 = mock.MagicMock() - finding_2.status = "FAIL" - finding_2.muted = False - finding_2.resource_uid = "test_resource_1" - finding_2.metadata.Severity = "critical" - finding_2.metadata.CheckID = "autoscaling_find_secrets_ec2_launch_configuration" + finding_1 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_1", + severity="critical", + muted=True, + ) + finding_2 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_1", + severity="critical", + muted=False, + ) + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 0 - assert stats["total_fail"] == 2 assert stats["total_muted_pass"] == 0 + assert stats["total_fail"] == 2 assert stats["total_muted_fail"] == 1 assert stats["resources_count"] == 1 + assert stats["findings_count"] == 2 assert stats["total_critical_severity_fail"] == 2 assert stats["total_critical_severity_pass"] == 0 assert stats["total_high_severity_fail"] == 0 @@ -379,31 +440,33 @@ def test_extract_findings_statistics_all_fail_are_not_muted(self): assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 0 assert stats["total_low_severity_pass"] == 0 - assert stats["findings_count"] == 2 - assert not stats["all_fails_are_muted"] + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is False def test_extract_findings_statistics_all_passes_are_not_muted(self): - finding_1 = mock.MagicMock() - finding_1.status = "PASS" - finding_1.muted = True - finding_1.resource_uid = "test_resource_1" - finding_1.metadata.Severity = "critical" - finding_1.metadata.CheckID = "autoscaling_find_secrets_ec2_launch_configuration" + finding_1 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=True, + ) + finding_2 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="high", + muted=False, + ) - finding_2 = mock.MagicMock() - finding_2.status = "PASS" - finding_2.muted = False - finding_2.resource_uid = "test_resource_1" - finding_2.metadata.Severity = "high" - finding_2.metadata.CheckID = "acm_certificates_expiration_check" findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 2 - assert stats["total_fail"] == 0 assert stats["total_muted_pass"] == 1 + assert stats["total_fail"] == 0 assert stats["total_muted_fail"] == 0 assert stats["resources_count"] == 1 + assert stats["findings_count"] == 2 assert stats["total_critical_severity_fail"] == 0 assert stats["total_critical_severity_pass"] == 1 assert stats["total_high_severity_fail"] == 0 @@ -412,23 +475,27 @@ def test_extract_findings_statistics_all_passes_are_not_muted(self): assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 0 assert stats["total_low_severity_pass"] == 0 - assert stats["findings_count"] == 2 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is True def test_extract_findings_statistics_all_passes_are_muted(self): - finding_1 = mock.MagicMock() - finding_1.status = "PASS" - finding_1.muted = True - finding_1.metadata.Severity = "critical" - finding_1.metadata.CheckID = "rds_instance_certificate_expiration" - finding_1.resource_uid = "test_resource_1" + finding_1 = generate_finding_output( + status="PASS", + resource_uid="test_resource_1", + severity="critical", + muted=True, + ) + findings = [finding_1] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 1 - assert stats["total_fail"] == 0 assert stats["total_muted_pass"] == 1 + assert stats["total_fail"] == 0 assert stats["total_muted_fail"] == 0 assert stats["resources_count"] == 1 + assert stats["findings_count"] == 1 assert stats["total_critical_severity_fail"] == 0 assert stats["total_critical_severity_pass"] == 1 assert stats["total_high_severity_fail"] == 0 @@ -437,7 +504,9 @@ def test_extract_findings_statistics_all_passes_are_muted(self): assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 0 assert stats["total_low_severity_pass"] == 0 - assert stats["findings_count"] == 1 + assert stats["total_informational_severity_fail"] == 0 + assert stats["total_informational_severity_pass"] == 0 + assert stats["all_fails_are_muted"] is True class TestReport: From 5b0f1b40710875686ba7ee189869f15d351308d3 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Thu, 27 Feb 2025 16:58:10 +0100 Subject: [PATCH 4/5] fix: test --- tests/lib/outputs/outputs_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lib/outputs/outputs_test.py b/tests/lib/outputs/outputs_test.py index b06be2eea5..9295502231 100644 --- a/tests/lib/outputs/outputs_test.py +++ b/tests/lib/outputs/outputs_test.py @@ -352,7 +352,7 @@ def test_extract_findings_statistics_manual_resources(self): assert stats["total_low_severity_pass"] == 0 assert stats["total_informational_severity_fail"] == 0 assert stats["total_informational_severity_pass"] == 0 - assert stats["all_fails_are_muted"] == 0 + assert stats["all_fails_are_muted"] is True def test_extract_findings_statistics_no_findings(self): stats = extract_findings_statistics([]) From 72e3ab9261a5b43b6b1264ecb1115f915b627cc6 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Fri, 28 Feb 2025 09:55:57 +0100 Subject: [PATCH 5/5] test: improve --- tests/lib/outputs/outputs_test.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/lib/outputs/outputs_test.py b/tests/lib/outputs/outputs_test.py index 9295502231..f61f28d1ca 100644 --- a/tests/lib/outputs/outputs_test.py +++ b/tests/lib/outputs/outputs_test.py @@ -483,29 +483,35 @@ def test_extract_findings_statistics_all_passes_are_muted(self): finding_1 = generate_finding_output( status="PASS", resource_uid="test_resource_1", - severity="critical", + severity="informational", + muted=True, + ) + finding_2 = generate_finding_output( + status="FAIL", + resource_uid="test_resource_1", + severity="informational", muted=True, ) - findings = [finding_1] + findings = [finding_1, finding_2] stats = extract_findings_statistics(findings) assert stats["total_pass"] == 1 assert stats["total_muted_pass"] == 1 - assert stats["total_fail"] == 0 - assert stats["total_muted_fail"] == 0 + assert stats["total_fail"] == 1 + assert stats["total_muted_fail"] == 1 assert stats["resources_count"] == 1 - assert stats["findings_count"] == 1 + assert stats["findings_count"] == 2 assert stats["total_critical_severity_fail"] == 0 - assert stats["total_critical_severity_pass"] == 1 + assert stats["total_critical_severity_pass"] == 0 assert stats["total_high_severity_fail"] == 0 assert stats["total_high_severity_pass"] == 0 assert stats["total_medium_severity_fail"] == 0 assert stats["total_medium_severity_pass"] == 0 assert stats["total_low_severity_fail"] == 0 assert stats["total_low_severity_pass"] == 0 - assert stats["total_informational_severity_fail"] == 0 - assert stats["total_informational_severity_pass"] == 0 + assert stats["total_informational_severity_fail"] == 1 + assert stats["total_informational_severity_pass"] == 1 assert stats["all_fails_are_muted"] is True