Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

2024-06-13 | MAIN --> PROD | DEV (6583b1c) --> STAGING #3982

Merged
merged 2 commits into from
Jun 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
from django.conf import settings

from census_historical_migration.invalid_record import InvalidRecord
from .errors import (
err_findings_count_inconsistent,
)
Expand All @@ -19,10 +22,17 @@ def check_findings_count_consistency(sac_dict, *_args, **_kwargs):
findings_uniform_guidance = findings_uniform_guidance_section.get(
"findings_uniform_guidance_entries", []
)

data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "")
expected_award_refs_count = {}
found_award_refs_count = defaultdict(int)
errors = []
if (
data_source == settings.CENSUS_DATA_SOURCE
and "check_findings_count_consistency"
in InvalidRecord.fields["validations_to_skip"]
):
# Skip this validation if it is an historical audit report with incorrect findings count
return errors

for award in federal_awards:
award_reference = award.get("award_reference", None)
Expand Down
3 changes: 1 addition & 2 deletions backend/audit/cross_validation/check_ref_number_in_cap.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,9 @@ def check_ref_number_in_cap(sac_dict, *_args, **_kwargs):
in_use_references = set()
errors = []

skip_validation_function = InvalidRecord.fields["validations_to_skip"]
if (
data_source == settings.CENSUS_DATA_SOURCE
and "check_ref_number_in_cap" in skip_validation_function
and "check_ref_number_in_cap" in InvalidRecord.fields["validations_to_skip"]
):
# Skip this validation if it is a historical audit report with non-matching reference numbers
return errors
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ def check_ref_number_in_findings_text(sac_dict, *_args, **_kwargs):
in_use_references = set()
errors = []

skip_validation_function = InvalidRecord.fields["validations_to_skip"]
if (
data_source == settings.CENSUS_DATA_SOURCE
and "check_ref_number_in_findings_text" in skip_validation_function
and "check_ref_number_in_findings_text"
in InvalidRecord.fields["validations_to_skip"]
):
# Skip this validation if it is a historical audit report with non-matching reference numbers
return errors
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ class INVALID_MIGRATION_TAGS:
EXTRA_FINDING_REFERENCE_NUMBERS_IN_FINDINGSTEXT = (
"extra_finding_reference_numbers_in_findingstext"
)
INCORRECT_FINDINGS_COUNT = "incorrect_findings_count"
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from unittest.mock import patch
from django.conf import settings
from django.test import SimpleTestCase
from django.test import SimpleTestCase, TestCase

from .invalid_migration_tags import INVALID_MIGRATION_TAGS
from .invalid_record import InvalidRecord
Expand All @@ -11,6 +11,7 @@
from .workbooklib.federal_awards import (
is_valid_prefix,
track_invalid_federal_program_total,
track_invalid_number_of_audit_findings,
xform_match_number_passthrough_names_ids,
xform_missing_amount_expended,
xform_missing_program_total,
Expand Down Expand Up @@ -855,6 +856,91 @@ def test_no_invalid_federal_program_total(self):
self.assertNotIn


class TestTrackInvalidNumberOfAuditFindings(TestCase):

class MockAudit:
def __init__(self, elec_audits_id, findings_count):
self.ELECAUDITSID = elec_audits_id
self.FINDINGSCOUNT = findings_count

class MockFinding:
def __init__(self, elec_audits_id):
self.ELECAUDITSID = elec_audits_id

class MockAuditHeader:
def __init__(self, dbkey, audityear):
self.DBKEY = dbkey
self.AUDITYEAR = audityear

def setUp(self):

InvalidRecord.reset()

@patch("census_historical_migration.workbooklib.federal_awards.get_findings")
def test_track_invalid_number_of_audit_findings(self, mock_get_findings):
"""Test tracking for invalid number of audit findings"""
# Mock the findings and audits data
mock_findings = [
self.MockFinding("audit1"),
self.MockFinding("audit1"),
]

mock_audits = [
self.MockAudit("audit1", "2"),
self.MockAudit("audit2", "1"),
]

mock_audit_header = self.MockAuditHeader("some_dbkey", "2024")

mock_get_findings.return_value = mock_findings

track_invalid_number_of_audit_findings(mock_audits, mock_audit_header)
mock_get_findings.assert_called_once_with("some_dbkey", "2024")
# Check if the invalid records were appended correctly
expected_invalid_records = [
[
{
"census_data": [
{"column": "ELECAUDITSID", "value": "audit1"},
{"column": "FINDINGSCOUNT", "value": "2"},
],
"gsa_fac_data": {"field": "findings_count", "value": "2"},
},
{
"census_data": [
{"column": "ELECAUDITSID", "value": "audit2"},
{"column": "FINDINGSCOUNT", "value": "1"},
],
"gsa_fac_data": {"field": "findings_count", "value": "0"},
},
]
]

for expected_record in expected_invalid_records:
self.assertIn(expected_record, InvalidRecord.fields["federal_award"])

@patch("census_historical_migration.workbooklib.federal_awards.get_findings")
def test_no_tracking_with_valid_number_of_audit_findings(self, mock_get_findings):
"""Test no tracking for valid number of audit findings"""
# Mock the findings and audits data
mock_findings = [
self.MockFinding("audit1"),
self.MockFinding("audit1"),
]

mock_audits = [
self.MockAudit("audit1", "2"),
]

mock_audit_header = self.MockAuditHeader("some_dbkey", "2024")
mock_get_findings.return_value = mock_findings

track_invalid_number_of_audit_findings(mock_audits, mock_audit_header)

mock_get_findings.assert_called_once_with("some_dbkey", "2024")
self.assertEqual(len(InvalidRecord.fields["federal_award"]), 0)


class TestXformMissingPrefix(SimpleTestCase):
class MockAudit:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ def track_invalid_records_with_more_captexts_less_findings(findings, captexts):
captext_refnums = get_reference_numbers_from_text_records(captexts)
invalid_records = []
extra_captexts = captext_refnums.difference(finding_refnums)
if len(extra_captexts) > 0:
invalid_records = []
missing_captexts = finding_refnums.difference(captext_refnums)
if len(extra_captexts):
for captext_refnum in captext_refnums:
census_data_tuples = [
("FINDINGREFNUMS", captext_refnum),
Expand All @@ -106,9 +106,21 @@ def track_invalid_records_with_more_captexts_less_findings(findings, captexts):
captext_refnum,
invalid_records,
)
InvalidRecord.append_invalid_cap_text_records(invalid_records)
elif len(missing_captexts):
for finding_refnum in missing_captexts:
census_data_tuples = [
("FINDINGREFNUMS", finding_refnum),
]
track_invalid_records(
census_data_tuples,
"reference_number",
finding_refnum,
invalid_records,
)
InvalidRecord.append_invalid_finding_records(invalid_records)

if invalid_records:
InvalidRecord.append_invalid_cap_text_records(invalid_records)
InvalidRecord.append_validations_to_skip("check_ref_number_in_cap")
InvalidRecord.append_invalid_migration_tag(
INVALID_MIGRATION_TAGS.EXTRA_FINDING_REFERENCE_NUMBERS_IN_CAPTEXT
Expand Down
51 changes: 51 additions & 0 deletions backend/census_historical_migration/workbooklib/federal_awards.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from collections import defaultdict
from audit.intakelib.checks.check_cluster_total import expected_cluster_total
from .findings import get_findings
from ..invalid_migration_tags import INVALID_MIGRATION_TAGS
from ..transforms.xform_retrieve_uei import xform_retrieve_uei
from ..transforms.xform_string_to_int import string_to_int
Expand Down Expand Up @@ -126,6 +128,53 @@ def xform_missing_major_program(audits):
InspectionRecord.append_federal_awards_changes(change_records)


def track_invalid_number_of_audit_findings(audits, audit_header):
"""Track invalid number of audit findings."""
findings = get_findings(audit_header.DBKEY, audit_header.AUDITYEAR)

invalid_audit_records = []
is_incorrect_findings_count_found = False
expected_finding_count = defaultdict(int)
declared_finding_count = defaultdict(int)

# Count the expected findings
for finding in findings:
expected_finding_count[finding.ELECAUDITSID] += 1

# Count the declared findings
for audit in audits:
declared_finding_count[audit.ELECAUDITSID] = string_to_int(audit.FINDINGSCOUNT)

# Check for discrepancies in findings count
if (
len(expected_finding_count) != len(declared_finding_count)
or sum(expected_finding_count.values()) != sum(declared_finding_count.values())
or declared_finding_count != expected_finding_count
):
is_incorrect_findings_count_found = True

# Track invalid audit records if discrepancies are found
if is_incorrect_findings_count_found:
for audit in audits:
elec_audits_id = audit.ELECAUDITSID
expected_count = expected_finding_count[elec_audits_id]
track_invalid_records(
[
("ELECAUDITSID", elec_audits_id),
("FINDINGSCOUNT", audit.FINDINGSCOUNT),
],
"findings_count",
str(expected_count),
invalid_audit_records,
)

InvalidRecord.append_validations_to_skip("check_findings_count_consistency")
InvalidRecord.append_invalid_migration_tag(
INVALID_MIGRATION_TAGS.INCORRECT_FINDINGS_COUNT,
)
InvalidRecord.append_invalid_federal_awards_records(invalid_audit_records)


def xform_missing_findings_count(audits):
"""Default missing findings count to zero."""
# Transformation to be documented.
Expand Down Expand Up @@ -888,11 +937,13 @@ def generate_federal_awards(audit_header, outfile):
audits, cluster_names, other_cluster_names, state_cluster_names
)
xform_missing_program_total(audits)

xform_missing_findings_count(audits)
xform_missing_amount_expended(audits)
xform_program_name(audits)
xform_is_passthrough_award(audits)
xform_missing_major_program(audits)
track_invalid_number_of_audit_findings(audits, audit_header)
xform_replace_required_values_with_gsa_migration_when_empty(audits)
xform_replace_missing_prefix(audits)
map_simple_columns(wb, mappings, audits)
Expand Down
20 changes: 16 additions & 4 deletions backend/census_historical_migration/workbooklib/findings_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,9 @@ def track_invalid_records_with_more_findings_texts_than_findings(
findings_text_refnums = get_reference_numbers_from_text_records(findings_texts)
invalid_records = []
extra_findings_texts = findings_text_refnums.difference(finding_refnums)
if len(extra_findings_texts) > 0:
invalid_records = []
missing_findings_texts = finding_refnums.difference(findings_text_refnums)

if len(extra_findings_texts):
for findings_text_refnum in findings_text_refnums:
census_data_tuples = [
("FINDINGREFNUMS", findings_text_refnum),
Expand All @@ -110,9 +111,20 @@ def track_invalid_records_with_more_findings_texts_than_findings(
findings_text_refnum,
invalid_records,
)

if invalid_records:
InvalidRecord.append_invalid_finding_text_records(invalid_records)
elif len(missing_findings_texts):
for finding_refnum in missing_findings_texts:
census_data_tuples = [
("FINDINGREFNUMS", finding_refnum),
]
track_invalid_records(
census_data_tuples,
"reference_number",
finding_refnum,
invalid_records,
)
InvalidRecord.append_invalid_finding_records(invalid_records)
if invalid_records:
InvalidRecord.append_validations_to_skip("check_ref_number_in_findings_text")
InvalidRecord.append_invalid_migration_tag(
INVALID_MIGRATION_TAGS.EXTRA_FINDING_REFERENCE_NUMBERS_IN_FINDINGSTEXT
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# 35. Access deletion: allow deletion of Audit Editor role
# 36. Access deletion: allow deletion of Audit Editor role

Date: 2024-05-28

Expand Down
Loading