From 2a16a0d902223c3164e65b4720e8b4fdb454e37c Mon Sep 17 00:00:00 2001 From: Bobby Novak <176936850+rnovak338@users.noreply.github.com> Date: Tue, 27 Aug 2024 16:46:12 -0400 Subject: [PATCH] Convert CRLF to LF (#4221) Used an extension in VSCode: `Change All End Of Line Sequence` to convert each of the following files to LF. --- .../check_award_ref_declaration.py | 80 +- .../check_award_ref_existence.py | 74 +- .../check_award_reference_uniqueness.py | 68 +- .../check_biennial_low_risk.py | 44 +- .../check_certifying_contacts.py | 40 +- .../check_finding_reference_uniqueness.py | 118 +- .../check_findings_count_consistency.py | 124 +- .../check_ref_number_in_cap.py | 142 +- .../check_ref_number_in_findings_text.py | 132 +- .../test_check_award_ref_declaration.py | 168 +- .../test_check_award_ref_existence.py | 120 +- .../test_check_award_reference_uniqueness.py | 96 +- .../test_check_certifying_contacts.py | 86 +- ...test_check_finding_reference_uniqueness.py | 270 +- .../test_check_findings_count_consistency.py | 206 +- .../test_check_ref_number_in_cap.py | 280 +- .../test_check_ref_number_in_findings_text.py | 280 +- backend/audit/cross_validation/utils.py | 56 +- .../xform_add_transform_for_cfda_key.py | 42 +- backend/audit/test_admin.py | 340 +-- backend/audit/test_utils.py | 76 +- backend/census_historical_migration/admin.py | 58 +- backend/census_historical_migration/apps.py | 12 +- .../base_field_maps.py | 60 +- .../exception_utils.py | 38 +- .../invalid_migration_tags.py | 30 +- .../commands/reprocess_failed_migration.py | 94 +- .../sac_general_lib/audit_information.py | 788 +++--- .../sac_general_lib/auditee_certification.py | 112 +- .../sac_general_lib/auditor_certification.py | 108 +- .../sac_general_lib/cognizant_oversight.py | 30 +- .../sac_general_lib/general_information.py | 1180 ++++---- .../sac_general_lib/report_id_generator.py | 32 +- .../sac_general_lib/sac_creator.py | 202 +- .../sac_general_lib/utils.py | 206 +- .../test_additional_eins_xforms.py | 42 +- .../test_audit_information_xforms.py | 564 ++-- .../test_corrective_action_plan_xforms.py | 192 +- .../test_excel_creation.py | 468 ++-- .../test_federal_awards_xforms.py | 2000 ++++++------- .../test_findings_xforms.py | 712 ++--- .../test_general_information_xforms.py | 1210 ++++---- .../test_notes_to_sefa_xforms.py | 1036 +++---- .../test_sac_creation.py | 234 +- .../test_secondary_auditors_xforms.py | 288 +- .../test_shared_xforms.py | 530 ++-- .../reprocess_migration_cli_commands.py | 72 +- .../start_process_cli_commands.py | 108 +- .../stop_process_cli_commands.py | 182 +- .../throwaway_scripts/util.py | 100 +- .../xform_remove_hyphen_and_pad_zip.py | 78 +- .../transforms/xform_retrieve_uei.py | 24 +- .../transforms/xform_string_to_bool.py | 50 +- .../transforms/xform_string_to_int.py | 40 +- .../transforms/xform_string_to_string.py | 32 +- .../transforms/xform_uppercase_y_or_n.py | 50 +- backend/census_historical_migration/views.py | 6 +- .../workbooklib/workbook_section_handlers.py | 68 +- backend/config/db_url.py | 34 +- backend/manifests/manifest-preview.yml | 60 +- backend/manifests/vars/vars-staging.yml | 16 +- .../report_submission/context_processors.py | 10 +- backend/requirements.txt | 2484 ++++++++--------- .../admin_api_v1_1_0/create_access_tables.sql | 86 +- ...g-invalid-audit-report-during-migration.md | 184 +- .../Application/fac_system_context_unified.md | 178 +- docs/backups_and_restores.md | 178 +- 67 files changed, 8554 insertions(+), 8554 deletions(-) diff --git a/backend/audit/cross_validation/check_award_ref_declaration.py b/backend/audit/cross_validation/check_award_ref_declaration.py index 3ac0b9d1a4..d04eebba88 100644 --- a/backend/audit/cross_validation/check_award_ref_declaration.py +++ b/backend/audit/cross_validation/check_award_ref_declaration.py @@ -1,40 +1,40 @@ -from .errors import ( - err_award_ref_not_declared, -) - - -def check_award_ref_declaration(sac_dict, *_args, **_kwargs): - """ - Check that the award references reported in the Federal Awards Audit Findings workbook - are present within the Federal Awards workbook. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - federal_awards_section = all_sections.get("federal_awards") or {} - federal_awards = federal_awards_section.get("federal_awards", []) - findings_uniform_guidance_section = ( - all_sections.get("findings_uniform_guidance") or {} - ) - findings_uniform_guidance = findings_uniform_guidance_section.get( - "findings_uniform_guidance_entries", [] - ) - - declared_award_refs = set() - reported_award_refs = set() - errors = [] - - for award in federal_awards: - award_ref = award.get("award_reference") - if award_ref: - declared_award_refs.add(award_ref) - - for finding in findings_uniform_guidance: - award_ref = finding["program"]["award_reference"] - if award_ref: - reported_award_refs.add(award_ref) - - difference = reported_award_refs.difference(declared_award_refs) - if difference: - errors.append({"error": err_award_ref_not_declared(list(difference))}) - - return errors +from .errors import ( + err_award_ref_not_declared, +) + + +def check_award_ref_declaration(sac_dict, *_args, **_kwargs): + """ + Check that the award references reported in the Federal Awards Audit Findings workbook + are present within the Federal Awards workbook. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + federal_awards_section = all_sections.get("federal_awards") or {} + federal_awards = federal_awards_section.get("federal_awards", []) + findings_uniform_guidance_section = ( + all_sections.get("findings_uniform_guidance") or {} + ) + findings_uniform_guidance = findings_uniform_guidance_section.get( + "findings_uniform_guidance_entries", [] + ) + + declared_award_refs = set() + reported_award_refs = set() + errors = [] + + for award in federal_awards: + award_ref = award.get("award_reference") + if award_ref: + declared_award_refs.add(award_ref) + + for finding in findings_uniform_guidance: + award_ref = finding["program"]["award_reference"] + if award_ref: + reported_award_refs.add(award_ref) + + difference = reported_award_refs.difference(declared_award_refs) + if difference: + errors.append({"error": err_award_ref_not_declared(list(difference))}) + + return errors diff --git a/backend/audit/cross_validation/check_award_ref_existence.py b/backend/audit/cross_validation/check_award_ref_existence.py index f12586bcd7..4c920b1e26 100644 --- a/backend/audit/cross_validation/check_award_ref_existence.py +++ b/backend/audit/cross_validation/check_award_ref_existence.py @@ -1,37 +1,37 @@ -from audit.fixtures.excel import ( - FEDERAL_AWARDS_TEMPLATE_DEFINITION, -) -from .errors import ( - err_missing_award_reference, -) -from django.conf import settings -import json - -TEMPLATE_DEFINITION_PATH = ( - settings.XLSX_TEMPLATE_JSON_DIR / FEDERAL_AWARDS_TEMPLATE_DEFINITION -) -FEDERAL_AWARDS_TEMPLATE = json.loads( - TEMPLATE_DEFINITION_PATH.read_text(encoding="utf-8") -) - - -def check_award_ref_existence(sac_dict, *_args, **_kwargs): - """ - Check that all awards have a reference code. - """ - first_row = FEDERAL_AWARDS_TEMPLATE["title_row"] - all_sections = sac_dict.get("sf_sac_sections", {}) - federal_awards_section = all_sections.get("federal_awards") or {} - federal_awards = federal_awards_section.get("federal_awards", []) - missing_refs = [] - errors = [] - for index, award in enumerate(federal_awards): - if ( - "award_reference" not in award - ): # When award_reference exists the schema ensures it is not empty and has the correct shape - missing_refs.append(first_row + index + 1) - - for row_num in missing_refs: - errors.append({"error": err_missing_award_reference(row_num)}) - - return errors +from audit.fixtures.excel import ( + FEDERAL_AWARDS_TEMPLATE_DEFINITION, +) +from .errors import ( + err_missing_award_reference, +) +from django.conf import settings +import json + +TEMPLATE_DEFINITION_PATH = ( + settings.XLSX_TEMPLATE_JSON_DIR / FEDERAL_AWARDS_TEMPLATE_DEFINITION +) +FEDERAL_AWARDS_TEMPLATE = json.loads( + TEMPLATE_DEFINITION_PATH.read_text(encoding="utf-8") +) + + +def check_award_ref_existence(sac_dict, *_args, **_kwargs): + """ + Check that all awards have a reference code. + """ + first_row = FEDERAL_AWARDS_TEMPLATE["title_row"] + all_sections = sac_dict.get("sf_sac_sections", {}) + federal_awards_section = all_sections.get("federal_awards") or {} + federal_awards = federal_awards_section.get("federal_awards", []) + missing_refs = [] + errors = [] + for index, award in enumerate(federal_awards): + if ( + "award_reference" not in award + ): # When award_reference exists the schema ensures it is not empty and has the correct shape + missing_refs.append(first_row + index + 1) + + for row_num in missing_refs: + errors.append({"error": err_missing_award_reference(row_num)}) + + return errors diff --git a/backend/audit/cross_validation/check_award_reference_uniqueness.py b/backend/audit/cross_validation/check_award_reference_uniqueness.py index cc0c4843d9..a1e57ff0db 100644 --- a/backend/audit/cross_validation/check_award_reference_uniqueness.py +++ b/backend/audit/cross_validation/check_award_reference_uniqueness.py @@ -1,34 +1,34 @@ -from .errors import ( - err_duplicate_award_reference, -) -from collections import Counter - - -def check_award_reference_uniqueness(sac_dict, *_args, **_kwargs): - """ - Check that all award references in the Federal Award workbook are distinct. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - federal_awards_section = all_sections.get("federal_awards") or {} - federal_awards = federal_awards_section.get("federal_awards", []) - award_refs = [] - errors = [] - - for award in federal_awards: - award_reference = award.get("award_reference") - - if award_reference: - award_refs.append(award_reference) - - duplicated_award_refs = _find_duplicates(award_refs) - - for dup_award_ref in duplicated_award_refs: - errors.append({"error": err_duplicate_award_reference(dup_award_ref)}) - - return errors - - -def _find_duplicates(lst): - count = Counter(lst) - return [item for item, count in count.items() if count > 1] +from .errors import ( + err_duplicate_award_reference, +) +from collections import Counter + + +def check_award_reference_uniqueness(sac_dict, *_args, **_kwargs): + """ + Check that all award references in the Federal Award workbook are distinct. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + federal_awards_section = all_sections.get("federal_awards") or {} + federal_awards = federal_awards_section.get("federal_awards", []) + award_refs = [] + errors = [] + + for award in federal_awards: + award_reference = award.get("award_reference") + + if award_reference: + award_refs.append(award_reference) + + duplicated_award_refs = _find_duplicates(award_refs) + + for dup_award_ref in duplicated_award_refs: + errors.append({"error": err_duplicate_award_reference(dup_award_ref)}) + + return errors + + +def _find_duplicates(lst): + count = Counter(lst) + return [item for item, count in count.items() if count > 1] diff --git a/backend/audit/cross_validation/check_biennial_low_risk.py b/backend/audit/cross_validation/check_biennial_low_risk.py index 71089ca2ed..202bc9fc9a 100644 --- a/backend/audit/cross_validation/check_biennial_low_risk.py +++ b/backend/audit/cross_validation/check_biennial_low_risk.py @@ -1,22 +1,22 @@ -from .errors import err_biennial_low_risk - - -def check_biennial_low_risk(sac_dict, *_args, **_kwargs): - """ - Check that both biennial and low risk flags aren't both set. - """ - all_sections = sac_dict["sf_sac_sections"] - - general_information = all_sections.get("general_information", {}) - audit_information = all_sections.get("audit_information", {}) - - if not (general_information and audit_information): - return [] - - audit_period_covered = general_information.get("audit_period_covered") - is_low_risk_auditee = audit_information.get("is_low_risk_auditee") - - if audit_period_covered == "biennial" and is_low_risk_auditee: - return [{"error": err_biennial_low_risk()}] - else: - return [] +from .errors import err_biennial_low_risk + + +def check_biennial_low_risk(sac_dict, *_args, **_kwargs): + """ + Check that both biennial and low risk flags aren't both set. + """ + all_sections = sac_dict["sf_sac_sections"] + + general_information = all_sections.get("general_information", {}) + audit_information = all_sections.get("audit_information", {}) + + if not (general_information and audit_information): + return [] + + audit_period_covered = general_information.get("audit_period_covered") + is_low_risk_auditee = audit_information.get("is_low_risk_auditee") + + if audit_period_covered == "biennial" and is_low_risk_auditee: + return [{"error": err_biennial_low_risk()}] + else: + return [] diff --git a/backend/audit/cross_validation/check_certifying_contacts.py b/backend/audit/cross_validation/check_certifying_contacts.py index 370eaa66f4..39d98c8304 100644 --- a/backend/audit/cross_validation/check_certifying_contacts.py +++ b/backend/audit/cross_validation/check_certifying_contacts.py @@ -1,20 +1,20 @@ -from .errors import ( - err_certifying_contacts_should_not_match, -) - - -def check_certifying_contacts(sac_dict, *_args, **_kwargs): - """ - Check that the certifying auditor and auditee do not have the same email address. - """ - - sf_sac_meta = sac_dict.get("sf_sac_meta", {}) - certifying_auditee_contact = sf_sac_meta.get("certifying_auditee_contact") - certifying_auditor_contact = sf_sac_meta.get("certifying_auditor_contact") - errors = [] - - if certifying_auditee_contact and certifying_auditor_contact: - if certifying_auditee_contact == certifying_auditor_contact: - errors.append({"error": err_certifying_contacts_should_not_match()}) - - return errors +from .errors import ( + err_certifying_contacts_should_not_match, +) + + +def check_certifying_contacts(sac_dict, *_args, **_kwargs): + """ + Check that the certifying auditor and auditee do not have the same email address. + """ + + sf_sac_meta = sac_dict.get("sf_sac_meta", {}) + certifying_auditee_contact = sf_sac_meta.get("certifying_auditee_contact") + certifying_auditor_contact = sf_sac_meta.get("certifying_auditor_contact") + errors = [] + + if certifying_auditee_contact and certifying_auditor_contact: + if certifying_auditee_contact == certifying_auditor_contact: + errors.append({"error": err_certifying_contacts_should_not_match()}) + + return errors diff --git a/backend/audit/cross_validation/check_finding_reference_uniqueness.py b/backend/audit/cross_validation/check_finding_reference_uniqueness.py index c7e7b94c97..4ab7985e50 100644 --- a/backend/audit/cross_validation/check_finding_reference_uniqueness.py +++ b/backend/audit/cross_validation/check_finding_reference_uniqueness.py @@ -1,59 +1,59 @@ -from django.conf import settings -from census_historical_migration.invalid_record import InvalidRecord -from .errors import ( - err_duplicate_finding_reference, -) -from collections import defaultdict - - -def check_finding_reference_uniqueness(sac_dict, *_args, **_kwargs): - """ - Check the uniqueness of REFERENCE numbers for each AWARD in findings. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") - waiver_types = sac_dict.get("waiver_types", []) - findings_uniform_guidance_section = ( - all_sections.get("findings_uniform_guidance") or {} - ) - findings_uniform_guidance = findings_uniform_guidance_section.get( - "findings_uniform_guidance_entries", [] - ) - - ref_numbers = defaultdict(set) - duplicate_ref_number = defaultdict(set) - errors = [] - - if ( - data_source == settings.CENSUS_DATA_SOURCE - and "check_finding_reference_uniqueness" - in InvalidRecord.fields["validations_to_skip"] - ): - # Skip this validation if it is an historical audit report with duplicate reference numbers - return errors - - from audit.models import SacValidationWaiver - - if SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER in waiver_types: - return errors - - for finding in findings_uniform_guidance: - award_ref = finding["program"]["award_reference"] - ref_number = finding["findings"]["reference_number"] - if ref_number in ref_numbers[award_ref]: - duplicate_ref_number[award_ref].add(ref_number) - ref_numbers[award_ref].add(ref_number) - - for award_ref, ref_nums in duplicate_ref_number.items(): - for ref_num in ref_nums: - errors.append( - { - "error": err_duplicate_finding_reference( - award_ref, - ref_num, - ) - } - ) - - return errors +from django.conf import settings +from census_historical_migration.invalid_record import InvalidRecord +from .errors import ( + err_duplicate_finding_reference, +) +from collections import defaultdict + + +def check_finding_reference_uniqueness(sac_dict, *_args, **_kwargs): + """ + Check the uniqueness of REFERENCE numbers for each AWARD in findings. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") + waiver_types = sac_dict.get("waiver_types", []) + findings_uniform_guidance_section = ( + all_sections.get("findings_uniform_guidance") or {} + ) + findings_uniform_guidance = findings_uniform_guidance_section.get( + "findings_uniform_guidance_entries", [] + ) + + ref_numbers = defaultdict(set) + duplicate_ref_number = defaultdict(set) + errors = [] + + if ( + data_source == settings.CENSUS_DATA_SOURCE + and "check_finding_reference_uniqueness" + in InvalidRecord.fields["validations_to_skip"] + ): + # Skip this validation if it is an historical audit report with duplicate reference numbers + return errors + + from audit.models import SacValidationWaiver + + if SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER in waiver_types: + return errors + + for finding in findings_uniform_guidance: + award_ref = finding["program"]["award_reference"] + ref_number = finding["findings"]["reference_number"] + if ref_number in ref_numbers[award_ref]: + duplicate_ref_number[award_ref].add(ref_number) + ref_numbers[award_ref].add(ref_number) + + for award_ref, ref_nums in duplicate_ref_number.items(): + for ref_num in ref_nums: + errors.append( + { + "error": err_duplicate_finding_reference( + award_ref, + ref_num, + ) + } + ) + + return errors diff --git a/backend/audit/cross_validation/check_findings_count_consistency.py b/backend/audit/cross_validation/check_findings_count_consistency.py index f0fc0cd101..08d1fcfefd 100644 --- a/backend/audit/cross_validation/check_findings_count_consistency.py +++ b/backend/audit/cross_validation/check_findings_count_consistency.py @@ -1,62 +1,62 @@ -from django.conf import settings - -from census_historical_migration.invalid_record import InvalidRecord -from .errors import ( - err_findings_count_inconsistent, -) -from collections import defaultdict - - -def check_findings_count_consistency(sac_dict, *_args, **_kwargs): - """ - Checks that the number of findings mentioned in Federal Awards matches - the number of findings referenced in Federal Awards Audit Findings. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - federal_awards_section = all_sections.get("federal_awards") or {} - federal_awards = federal_awards_section.get("federal_awards", []) - findings_uniform_guidance_section = ( - all_sections.get("findings_uniform_guidance") or {} - ) - findings_uniform_guidance = findings_uniform_guidance_section.get( - "findings_uniform_guidance_entries", [] - ) - data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") - expected_award_refs_count = {} - found_award_refs_count = defaultdict(int) - errors = [] - if ( - data_source == settings.CENSUS_DATA_SOURCE - and "check_findings_count_consistency" - in InvalidRecord.fields["validations_to_skip"] - ): - # Skip this validation if it is an historical audit report with incorrect findings count - return errors - - for award in federal_awards: - award_reference = award.get("award_reference", None) - if award_reference: - expected_award_refs_count[award_reference] = award["program"][ - "number_of_audit_findings" - ] - - for finding in findings_uniform_guidance: - award_ref = finding["program"]["award_reference"] - if award_ref in expected_award_refs_count: - found_award_refs_count[award_ref] += 1 - - for award_ref, expected in expected_award_refs_count.items(): - counted = found_award_refs_count[award_ref] - if counted != expected: - errors.append( - { - "error": err_findings_count_inconsistent( - expected, - counted, - award_ref, - ) - } - ) - - return errors +from django.conf import settings + +from census_historical_migration.invalid_record import InvalidRecord +from .errors import ( + err_findings_count_inconsistent, +) +from collections import defaultdict + + +def check_findings_count_consistency(sac_dict, *_args, **_kwargs): + """ + Checks that the number of findings mentioned in Federal Awards matches + the number of findings referenced in Federal Awards Audit Findings. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + federal_awards_section = all_sections.get("federal_awards") or {} + federal_awards = federal_awards_section.get("federal_awards", []) + findings_uniform_guidance_section = ( + all_sections.get("findings_uniform_guidance") or {} + ) + findings_uniform_guidance = findings_uniform_guidance_section.get( + "findings_uniform_guidance_entries", [] + ) + data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") + expected_award_refs_count = {} + found_award_refs_count = defaultdict(int) + errors = [] + if ( + data_source == settings.CENSUS_DATA_SOURCE + and "check_findings_count_consistency" + in InvalidRecord.fields["validations_to_skip"] + ): + # Skip this validation if it is an historical audit report with incorrect findings count + return errors + + for award in federal_awards: + award_reference = award.get("award_reference", None) + if award_reference: + expected_award_refs_count[award_reference] = award["program"][ + "number_of_audit_findings" + ] + + for finding in findings_uniform_guidance: + award_ref = finding["program"]["award_reference"] + if award_ref in expected_award_refs_count: + found_award_refs_count[award_ref] += 1 + + for award_ref, expected in expected_award_refs_count.items(): + counted = found_award_refs_count[award_ref] + if counted != expected: + errors.append( + { + "error": err_findings_count_inconsistent( + expected, + counted, + award_ref, + ) + } + ) + + return errors diff --git a/backend/audit/cross_validation/check_ref_number_in_cap.py b/backend/audit/cross_validation/check_ref_number_in_cap.py index 8de9c40283..22ca989e46 100644 --- a/backend/audit/cross_validation/check_ref_number_in_cap.py +++ b/backend/audit/cross_validation/check_ref_number_in_cap.py @@ -1,71 +1,71 @@ -from django.conf import settings -from census_historical_migration.invalid_record import InvalidRecord -from .errors import ( - err_missing_or_extra_references, -) -from audit.fixtures.excel import ( - SECTION_NAMES, -) - -import logging - -logger = logging.getLogger(__name__) - - -def check_ref_number_in_cap(sac_dict, *_args, **_kwargs): - """ - Check that all reference numbers in Federal Award Audit Findings workbook - appear at least once in the Corrective Action Plan workbook. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") - findings_uniform_guidance_section = ( - all_sections.get("findings_uniform_guidance") or {} - ) - findings_uniform_guidance = findings_uniform_guidance_section.get( - "findings_uniform_guidance_entries", [] - ) - corrective_action_plan_section = all_sections.get("corrective_action_plan") or {} - corrective_action_plan = corrective_action_plan_section.get( - "corrective_action_plan_entries", [] - ) - - declared_references = set() - in_use_references = set() - errors = [] - - if ( - data_source == settings.CENSUS_DATA_SOURCE - and "check_ref_number_in_cap" in InvalidRecord.fields["validations_to_skip"] - ): - # Skip this validation if it is a historical audit report with non-matching reference numbers - return errors - - for finding in findings_uniform_guidance: - ref_number = finding["findings"]["reference_number"] - if ref_number: - declared_references.add(ref_number) - - for cap in corrective_action_plan: - ref_number = cap["reference_number"] - if ref_number: - in_use_references.add(ref_number) - - # Items in declared_references but not in in_use_references - declared_not_in_use = declared_references.difference(in_use_references) - - # Items in in_use_references but not in declared_references - in_use_not_declared = in_use_references.difference(declared_references) - - if declared_not_in_use or in_use_not_declared: - errors.append( - { - "error": err_missing_or_extra_references( - declared_not_in_use, - in_use_not_declared, - SECTION_NAMES.CORRECTIVE_ACTION_PLAN, - ) - } - ) - return errors +from django.conf import settings +from census_historical_migration.invalid_record import InvalidRecord +from .errors import ( + err_missing_or_extra_references, +) +from audit.fixtures.excel import ( + SECTION_NAMES, +) + +import logging + +logger = logging.getLogger(__name__) + + +def check_ref_number_in_cap(sac_dict, *_args, **_kwargs): + """ + Check that all reference numbers in Federal Award Audit Findings workbook + appear at least once in the Corrective Action Plan workbook. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") + findings_uniform_guidance_section = ( + all_sections.get("findings_uniform_guidance") or {} + ) + findings_uniform_guidance = findings_uniform_guidance_section.get( + "findings_uniform_guidance_entries", [] + ) + corrective_action_plan_section = all_sections.get("corrective_action_plan") or {} + corrective_action_plan = corrective_action_plan_section.get( + "corrective_action_plan_entries", [] + ) + + declared_references = set() + in_use_references = set() + errors = [] + + if ( + data_source == settings.CENSUS_DATA_SOURCE + and "check_ref_number_in_cap" in InvalidRecord.fields["validations_to_skip"] + ): + # Skip this validation if it is a historical audit report with non-matching reference numbers + return errors + + for finding in findings_uniform_guidance: + ref_number = finding["findings"]["reference_number"] + if ref_number: + declared_references.add(ref_number) + + for cap in corrective_action_plan: + ref_number = cap["reference_number"] + if ref_number: + in_use_references.add(ref_number) + + # Items in declared_references but not in in_use_references + declared_not_in_use = declared_references.difference(in_use_references) + + # Items in in_use_references but not in declared_references + in_use_not_declared = in_use_references.difference(declared_references) + + if declared_not_in_use or in_use_not_declared: + errors.append( + { + "error": err_missing_or_extra_references( + declared_not_in_use, + in_use_not_declared, + SECTION_NAMES.CORRECTIVE_ACTION_PLAN, + ) + } + ) + return errors diff --git a/backend/audit/cross_validation/check_ref_number_in_findings_text.py b/backend/audit/cross_validation/check_ref_number_in_findings_text.py index ea84204a82..eef402e991 100644 --- a/backend/audit/cross_validation/check_ref_number_in_findings_text.py +++ b/backend/audit/cross_validation/check_ref_number_in_findings_text.py @@ -1,66 +1,66 @@ -from django.conf import settings -from census_historical_migration.invalid_record import InvalidRecord -from .errors import ( - err_missing_or_extra_references, -) -from audit.fixtures.excel import ( - SECTION_NAMES, -) - - -def check_ref_number_in_findings_text(sac_dict, *_args, **_kwargs): - """ - Check that all reference numbers in Federal Award Audit Findings workbook - appear at least once in the Audit Findings Text workbook. - """ - - all_sections = sac_dict.get("sf_sac_sections", {}) - data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") - findings_uniform_guidance_section = ( - all_sections.get("findings_uniform_guidance") or {} - ) - findings_uniform_guidance = findings_uniform_guidance_section.get( - "findings_uniform_guidance_entries", [] - ) - findings_text_section = all_sections.get("findings_text") or {} - findings_text = findings_text_section.get("findings_text_entries", []) - - declared_references = set() - in_use_references = set() - errors = [] - - if ( - data_source == settings.CENSUS_DATA_SOURCE - and "check_ref_number_in_findings_text" - in InvalidRecord.fields["validations_to_skip"] - ): - # Skip this validation if it is a historical audit report with non-matching reference numbers - return errors - - for finding in findings_uniform_guidance: - ref_number = finding["findings"]["reference_number"] - if ref_number: - declared_references.add(ref_number) - - for finding in findings_text: - ref_number = finding["reference_number"] - if ref_number: - in_use_references.add(ref_number) - - # Items in declared_references but not in in_use_references - declared_not_in_use = declared_references.difference(in_use_references) - - # Items in in_use_references but not in declared_references - in_use_not_declared = in_use_references.difference(declared_references) - - if declared_not_in_use or in_use_not_declared: - errors.append( - { - "error": err_missing_or_extra_references( - declared_not_in_use, - in_use_not_declared, - SECTION_NAMES.AUDIT_FINDINGS_TEXT, - ) - } - ) - return errors +from django.conf import settings +from census_historical_migration.invalid_record import InvalidRecord +from .errors import ( + err_missing_or_extra_references, +) +from audit.fixtures.excel import ( + SECTION_NAMES, +) + + +def check_ref_number_in_findings_text(sac_dict, *_args, **_kwargs): + """ + Check that all reference numbers in Federal Award Audit Findings workbook + appear at least once in the Audit Findings Text workbook. + """ + + all_sections = sac_dict.get("sf_sac_sections", {}) + data_source = sac_dict.get("sf_sac_meta", {}).get("data_source", "") + findings_uniform_guidance_section = ( + all_sections.get("findings_uniform_guidance") or {} + ) + findings_uniform_guidance = findings_uniform_guidance_section.get( + "findings_uniform_guidance_entries", [] + ) + findings_text_section = all_sections.get("findings_text") or {} + findings_text = findings_text_section.get("findings_text_entries", []) + + declared_references = set() + in_use_references = set() + errors = [] + + if ( + data_source == settings.CENSUS_DATA_SOURCE + and "check_ref_number_in_findings_text" + in InvalidRecord.fields["validations_to_skip"] + ): + # Skip this validation if it is a historical audit report with non-matching reference numbers + return errors + + for finding in findings_uniform_guidance: + ref_number = finding["findings"]["reference_number"] + if ref_number: + declared_references.add(ref_number) + + for finding in findings_text: + ref_number = finding["reference_number"] + if ref_number: + in_use_references.add(ref_number) + + # Items in declared_references but not in in_use_references + declared_not_in_use = declared_references.difference(in_use_references) + + # Items in in_use_references but not in declared_references + in_use_not_declared = in_use_references.difference(declared_references) + + if declared_not_in_use or in_use_not_declared: + errors.append( + { + "error": err_missing_or_extra_references( + declared_not_in_use, + in_use_not_declared, + SECTION_NAMES.AUDIT_FINDINGS_TEXT, + ) + } + ) + return errors diff --git a/backend/audit/cross_validation/test_check_award_ref_declaration.py b/backend/audit/cross_validation/test_check_award_ref_declaration.py index 338f84eca8..e64a768681 100644 --- a/backend/audit/cross_validation/test_check_award_ref_declaration.py +++ b/backend/audit/cross_validation/test_check_award_ref_declaration.py @@ -1,84 +1,84 @@ -from unittest import TestCase -from audit.models import SingleAuditChecklist -from .sac_validation_shape import sac_validation_shape -from .utils import generate_random_integer -from .check_award_ref_declaration import check_award_ref_declaration -from .errors import err_award_ref_not_declared -from model_bakery import baker - - -class CheckAwardRefDeclarationTests(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.AWARD_MIN = 1000 - self.AWARD_MAX = 1500 - self.AUDITEE_UEI = "AAA123456BBB" - self.award1 = { - "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN,self.AWARD_MAX)}" - } - self.award2 = { - "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN *2,self.AWARD_MAX *2)}" - } - self.award3 = { - "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN *3,self.AWARD_MAX *3)}" - } - - def _make_federal_awards(self, award_refs) -> dict: - return { - "FederalAwards": { - "auditee_uei": self.AUDITEE_UEI, - "federal_awards": award_refs, - } - } - - def _make_findings_uniform_guidance(self, award_refs) -> dict: - entries = [] - for ref in award_refs: - entries.append({"program": ref}) - - findings = ( - { - "auditee_uei": self.AUDITEE_UEI, - "findings_uniform_guidance_entries": entries, - } - if len(entries) > 0 - else {"auditee_uei": self.AUDITEE_UEI} - ) - - return {"FindingsUniformGuidance": findings} - - def _make_sac(self, award_refs, findings_award_refs) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.federal_awards = self._make_federal_awards(award_refs) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - findings_award_refs - ) - return sac - - def test_no_errors_for_matching_award_references(self): - """When the set of award references in both the Federal Awards workbook and the Federal - Awards Audit Findings workbook match, no errors should be raised.""" - award_refs = [self.award1, self.award2] - sac = self._make_sac(award_refs, award_refs) - errors = check_award_ref_declaration(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_no_errors_for_subset_award_references_in_findings(self): - """When the set of award references in the Federal Awards Audit Findings workbook is - a subset of the set of award references in the Federal Awards workbook, no errors should be raised. - """ - sac = self._make_sac( - [self.award1, self.award2, self.award3], [self.award1, self.award3] - ) - errors = check_award_ref_declaration(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_errors_for_findings_with_undeclared_award_refs(self): - """When the set of award references in the Federal Awards Audit Findings workbook is not - a subset of the set of award references in the Federal Awards workbook, errors should be raised. - """ - sac = self._make_sac([self.award1, self.award3], [self.award1, self.award2]) - errors = check_award_ref_declaration(sac_validation_shape(sac)) - self.assertEqual(len(errors), 1) - expected_error = err_award_ref_not_declared([self.award2["award_reference"]]) - self.assertIn({"error": expected_error}, errors) +from unittest import TestCase +from audit.models import SingleAuditChecklist +from .sac_validation_shape import sac_validation_shape +from .utils import generate_random_integer +from .check_award_ref_declaration import check_award_ref_declaration +from .errors import err_award_ref_not_declared +from model_bakery import baker + + +class CheckAwardRefDeclarationTests(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.AWARD_MIN = 1000 + self.AWARD_MAX = 1500 + self.AUDITEE_UEI = "AAA123456BBB" + self.award1 = { + "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN,self.AWARD_MAX)}" + } + self.award2 = { + "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN *2,self.AWARD_MAX *2)}" + } + self.award3 = { + "award_reference": f"AWARD-{generate_random_integer(self.AWARD_MIN *3,self.AWARD_MAX *3)}" + } + + def _make_federal_awards(self, award_refs) -> dict: + return { + "FederalAwards": { + "auditee_uei": self.AUDITEE_UEI, + "federal_awards": award_refs, + } + } + + def _make_findings_uniform_guidance(self, award_refs) -> dict: + entries = [] + for ref in award_refs: + entries.append({"program": ref}) + + findings = ( + { + "auditee_uei": self.AUDITEE_UEI, + "findings_uniform_guidance_entries": entries, + } + if len(entries) > 0 + else {"auditee_uei": self.AUDITEE_UEI} + ) + + return {"FindingsUniformGuidance": findings} + + def _make_sac(self, award_refs, findings_award_refs) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.federal_awards = self._make_federal_awards(award_refs) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + findings_award_refs + ) + return sac + + def test_no_errors_for_matching_award_references(self): + """When the set of award references in both the Federal Awards workbook and the Federal + Awards Audit Findings workbook match, no errors should be raised.""" + award_refs = [self.award1, self.award2] + sac = self._make_sac(award_refs, award_refs) + errors = check_award_ref_declaration(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_no_errors_for_subset_award_references_in_findings(self): + """When the set of award references in the Federal Awards Audit Findings workbook is + a subset of the set of award references in the Federal Awards workbook, no errors should be raised. + """ + sac = self._make_sac( + [self.award1, self.award2, self.award3], [self.award1, self.award3] + ) + errors = check_award_ref_declaration(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_errors_for_findings_with_undeclared_award_refs(self): + """When the set of award references in the Federal Awards Audit Findings workbook is not + a subset of the set of award references in the Federal Awards workbook, errors should be raised. + """ + sac = self._make_sac([self.award1, self.award3], [self.award1, self.award2]) + errors = check_award_ref_declaration(sac_validation_shape(sac)) + self.assertEqual(len(errors), 1) + expected_error = err_award_ref_not_declared([self.award2["award_reference"]]) + self.assertIn({"error": expected_error}, errors) diff --git a/backend/audit/cross_validation/test_check_award_ref_existence.py b/backend/audit/cross_validation/test_check_award_ref_existence.py index 284dff8139..76e2719288 100644 --- a/backend/audit/cross_validation/test_check_award_ref_existence.py +++ b/backend/audit/cross_validation/test_check_award_ref_existence.py @@ -1,60 +1,60 @@ -from django.test import TestCase -from audit.models import SingleAuditChecklist -from .check_award_ref_existence import ( - check_award_ref_existence, - FEDERAL_AWARDS_TEMPLATE, -) -from .sac_validation_shape import sac_validation_shape -from .errors import err_missing_award_reference - -from model_bakery import baker - - -class CheckAwardRefExistenceTest(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.award1 = "AWARD-0001" - self.award2 = None # Missing award_reference - self.award3 = "AWARD-2910" - - def _make_federal_awards(self, award_refs) -> dict: - return { - "FederalAwards": { - "federal_awards": [ - ( - { - "program": {"federal_agency_prefix": "10"}, - "award_reference": award_ref, - } - if award_ref - else {"program": {"federal_agency_prefix": "22"}} - ) - for award_ref in award_refs - ], - } - } - - def _make_sac(self, award_refs) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.federal_awards = self._make_federal_awards(award_refs) - return sac - - def test_all_awards_have_references(self): - """When all awards have a reference, no errors should be raised.""" - sac = self._make_sac([self.award1, self.award3]) - errors = check_award_ref_existence(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_missing_award_references_raise_errors(self): - """When there are awards missing references, the appropriate errors should be raised.""" - sac = self._make_sac([self.award1, self.award2, self.award3]) - errors = check_award_ref_existence(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 1) - - first_row = FEDERAL_AWARDS_TEMPLATE["title_row"] - row_num = first_row + 2 # 2 = index of award2 in the list of awards + 1 - - expected_error = err_missing_award_reference(row_num) - - self.assertIn({"error": expected_error}, errors) +from django.test import TestCase +from audit.models import SingleAuditChecklist +from .check_award_ref_existence import ( + check_award_ref_existence, + FEDERAL_AWARDS_TEMPLATE, +) +from .sac_validation_shape import sac_validation_shape +from .errors import err_missing_award_reference + +from model_bakery import baker + + +class CheckAwardRefExistenceTest(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.award1 = "AWARD-0001" + self.award2 = None # Missing award_reference + self.award3 = "AWARD-2910" + + def _make_federal_awards(self, award_refs) -> dict: + return { + "FederalAwards": { + "federal_awards": [ + ( + { + "program": {"federal_agency_prefix": "10"}, + "award_reference": award_ref, + } + if award_ref + else {"program": {"federal_agency_prefix": "22"}} + ) + for award_ref in award_refs + ], + } + } + + def _make_sac(self, award_refs) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.federal_awards = self._make_federal_awards(award_refs) + return sac + + def test_all_awards_have_references(self): + """When all awards have a reference, no errors should be raised.""" + sac = self._make_sac([self.award1, self.award3]) + errors = check_award_ref_existence(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_missing_award_references_raise_errors(self): + """When there are awards missing references, the appropriate errors should be raised.""" + sac = self._make_sac([self.award1, self.award2, self.award3]) + errors = check_award_ref_existence(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 1) + + first_row = FEDERAL_AWARDS_TEMPLATE["title_row"] + row_num = first_row + 2 # 2 = index of award2 in the list of awards + 1 + + expected_error = err_missing_award_reference(row_num) + + self.assertIn({"error": expected_error}, errors) diff --git a/backend/audit/cross_validation/test_check_award_reference_uniqueness.py b/backend/audit/cross_validation/test_check_award_reference_uniqueness.py index a7170b794c..37c463cdfe 100644 --- a/backend/audit/cross_validation/test_check_award_reference_uniqueness.py +++ b/backend/audit/cross_validation/test_check_award_reference_uniqueness.py @@ -1,48 +1,48 @@ -from django.test import TestCase -from audit.models import SingleAuditChecklist -from .check_award_reference_uniqueness import check_award_reference_uniqueness -from .sac_validation_shape import sac_validation_shape -from .errors import err_duplicate_award_reference -from model_bakery import baker - - -class CheckAwardReferenceUniquenessTests(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.award1 = {"award_reference": "AWARD-0001"} - self.award2 = {"award_reference": "AWARD-2219"} - self.award3 = {"award_reference": "AWARD-2910"} - - def _make_federal_awards(self, award_refs) -> dict: - return { - "FederalAwards": { - "federal_awards": award_refs, - } - } - - def _make_sac(self, award_refs) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.federal_awards = self._make_federal_awards(award_refs) - return sac - - def test_unique_award_references_raise_no_errors(self): - """When all award references are unique, no errors should be raised.""" - sac = self._make_sac([self.award1, self.award2, self.award3]) - errors = check_award_reference_uniqueness(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_duplicate_award_references_raise_errors(self): - """When there are duplicate award references, the appropriate errors should be raised.""" - sac = self._make_sac( - [self.award1, self.award1, self.award3, self.award3, self.award3] - ) - errors = check_award_reference_uniqueness(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 2) - - expected_error1 = err_duplicate_award_reference(self.award1["award_reference"]) - expected_error2 = err_duplicate_award_reference(self.award3["award_reference"]) - - self.assertIn({"error": expected_error1}, errors) - - self.assertIn({"error": expected_error2}, errors) +from django.test import TestCase +from audit.models import SingleAuditChecklist +from .check_award_reference_uniqueness import check_award_reference_uniqueness +from .sac_validation_shape import sac_validation_shape +from .errors import err_duplicate_award_reference +from model_bakery import baker + + +class CheckAwardReferenceUniquenessTests(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.award1 = {"award_reference": "AWARD-0001"} + self.award2 = {"award_reference": "AWARD-2219"} + self.award3 = {"award_reference": "AWARD-2910"} + + def _make_federal_awards(self, award_refs) -> dict: + return { + "FederalAwards": { + "federal_awards": award_refs, + } + } + + def _make_sac(self, award_refs) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.federal_awards = self._make_federal_awards(award_refs) + return sac + + def test_unique_award_references_raise_no_errors(self): + """When all award references are unique, no errors should be raised.""" + sac = self._make_sac([self.award1, self.award2, self.award3]) + errors = check_award_reference_uniqueness(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_duplicate_award_references_raise_errors(self): + """When there are duplicate award references, the appropriate errors should be raised.""" + sac = self._make_sac( + [self.award1, self.award1, self.award3, self.award3, self.award3] + ) + errors = check_award_reference_uniqueness(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 2) + + expected_error1 = err_duplicate_award_reference(self.award1["award_reference"]) + expected_error2 = err_duplicate_award_reference(self.award3["award_reference"]) + + self.assertIn({"error": expected_error1}, errors) + + self.assertIn({"error": expected_error2}, errors) diff --git a/backend/audit/cross_validation/test_check_certifying_contacts.py b/backend/audit/cross_validation/test_check_certifying_contacts.py index 7dbe045d31..43830804bb 100644 --- a/backend/audit/cross_validation/test_check_certifying_contacts.py +++ b/backend/audit/cross_validation/test_check_certifying_contacts.py @@ -1,43 +1,43 @@ -from django.test import TestCase -from audit.models import Access, SingleAuditChecklist -from .sac_validation_shape import sac_validation_shape -from .check_certifying_contacts import check_certifying_contacts -from .errors import err_certifying_contacts_should_not_match - -from model_bakery import baker - - -class CheckCertifyingContactsTest(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.email1 = "auditee@example.com" - self.email2 = "auditor@example.com" - self.email3 = "same@example.com" - - def _make_sac(self, auditee_email, auditor_email) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - baker.make( - Access, sac=sac, role="certifying_auditee_contact", email=auditee_email - ) - baker.make( - Access, sac=sac, role="certifying_auditor_contact", email=auditor_email - ) - - return sac - - def test_diff_certifying_contacts(self): - """When auditor and auditee emails are different, no errors should be raised.""" - sac = self._make_sac(self.email1, self.email2) - errors = check_certifying_contacts(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_same_certifying_contacts_raise_errors(self): - """When auditor and auditee emails are the same, the appropriate error should be raised.""" - sac = self._make_sac(self.email3, self.email3) - errors = check_certifying_contacts(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 1) - - expected_error = err_certifying_contacts_should_not_match() - - self.assertIn({"error": expected_error}, errors) +from django.test import TestCase +from audit.models import Access, SingleAuditChecklist +from .sac_validation_shape import sac_validation_shape +from .check_certifying_contacts import check_certifying_contacts +from .errors import err_certifying_contacts_should_not_match + +from model_bakery import baker + + +class CheckCertifyingContactsTest(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.email1 = "auditee@example.com" + self.email2 = "auditor@example.com" + self.email3 = "same@example.com" + + def _make_sac(self, auditee_email, auditor_email) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + baker.make( + Access, sac=sac, role="certifying_auditee_contact", email=auditee_email + ) + baker.make( + Access, sac=sac, role="certifying_auditor_contact", email=auditor_email + ) + + return sac + + def test_diff_certifying_contacts(self): + """When auditor and auditee emails are different, no errors should be raised.""" + sac = self._make_sac(self.email1, self.email2) + errors = check_certifying_contacts(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_same_certifying_contacts_raise_errors(self): + """When auditor and auditee emails are the same, the appropriate error should be raised.""" + sac = self._make_sac(self.email3, self.email3) + errors = check_certifying_contacts(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 1) + + expected_error = err_certifying_contacts_should_not_match() + + self.assertIn({"error": expected_error}, errors) diff --git a/backend/audit/cross_validation/test_check_finding_reference_uniqueness.py b/backend/audit/cross_validation/test_check_finding_reference_uniqueness.py index d164e0b49d..2ef4d26a2c 100644 --- a/backend/audit/cross_validation/test_check_finding_reference_uniqueness.py +++ b/backend/audit/cross_validation/test_check_finding_reference_uniqueness.py @@ -1,135 +1,135 @@ -from django.conf import settings -from django.test import TestCase -from audit.models import SingleAuditChecklist, SacValidationWaiver -from census_historical_migration.invalid_record import InvalidRecord -from .check_finding_reference_uniqueness import check_finding_reference_uniqueness -from .sac_validation_shape import sac_validation_shape -from .errors import err_duplicate_finding_reference -from .utils import generate_random_integer -from model_bakery import baker - - -class CheckFindingReferenceUniquenessTests(TestCase): - AWARD_MIN = 1000 - AWARD_MAX = 2000 - REF_MIN = 100 - REF_MAX = 200 - - def _award_reference(self) -> str: - return f"AWARD-{generate_random_integer(self.AWARD_MIN, self.AWARD_MAX)}" - - def _reference_number(self, ref_num=None) -> str: - return ( - f"2023-{generate_random_integer(self.REF_MIN, self.REF_MAX)}" - if ref_num is None - else f"2023-{ref_num}" - ) - - def _make_findings_uniform_guidance( - self, award_references, reference_numbers - ) -> dict: - entries = [] - for award_ref, ref_nums in zip(award_references, reference_numbers): - for ref_num in ref_nums: - entries.append( - { - "program": {"award_reference": award_ref}, - "findings": {"reference_number": ref_num}, - } - ) - - return { - "FindingsUniformGuidance": { - "auditee_uei": "ABB123456CCC", - "findings_uniform_guidance_entries": entries, - } - } - - def test_no_duplicate_references(self): - """ - Check that no error is returned when there are no duplicate reference numbers. - """ - range_size = generate_random_integer(2, 4) - sac = baker.make(SingleAuditChecklist) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - [self._award_reference() for _ in range(range_size)], - [[self._reference_number(self.REF_MIN + i)] for i in range(range_size)], - ) - errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_duplicate_references_for_award(self): - """ - Check that errors are returned for awards with duplicate reference numbers. - """ - range_size = generate_random_integer(2, 4) - sac = baker.make(SingleAuditChecklist) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - [self._award_reference() for _ in range(range_size)], - [ - [ - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MAX), - ] - ], - ) - errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) - for finding in sac.findings_uniform_guidance["FindingsUniformGuidance"][ - "findings_uniform_guidance_entries" - ]: - expected_error = { - "error": err_duplicate_finding_reference( - finding["program"]["award_reference"], - self._reference_number(self.REF_MIN), - ) - } - self.assertIn(expected_error, errors) - - def test_duplicate_references_for_historical_award(self): - """ - Check that errors are not returned for historical awards with duplicate reference numbers. - """ - range_size = generate_random_integer(2, 4) - sac = baker.make(SingleAuditChecklist) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - [self._award_reference() for _ in range(range_size)], - [ - [ - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MAX), - ] - ], - ) - sac.data_source = settings.CENSUS_DATA_SOURCE - InvalidRecord.reset() - InvalidRecord.append_validations_to_skip("check_finding_reference_uniqueness") - - errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_duplicate_finding_reference_numbers_with_waivers(self): - """ - Check that errors are not returned for report with duplicate reference numbers when the validation is waived. - """ - range_size = generate_random_integer(2, 4) - sac = baker.make(SingleAuditChecklist) - baker.make( - SacValidationWaiver, - report_id=sac, - waiver_types=[SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER], - ) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - [self._award_reference() for _ in range(range_size)], - [ - [ - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MIN), - self._reference_number(self.REF_MAX), - ] - ], - ) - sac.waiver_types = [SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER] - errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) - self.assertEqual(errors, []) +from django.conf import settings +from django.test import TestCase +from audit.models import SingleAuditChecklist, SacValidationWaiver +from census_historical_migration.invalid_record import InvalidRecord +from .check_finding_reference_uniqueness import check_finding_reference_uniqueness +from .sac_validation_shape import sac_validation_shape +from .errors import err_duplicate_finding_reference +from .utils import generate_random_integer +from model_bakery import baker + + +class CheckFindingReferenceUniquenessTests(TestCase): + AWARD_MIN = 1000 + AWARD_MAX = 2000 + REF_MIN = 100 + REF_MAX = 200 + + def _award_reference(self) -> str: + return f"AWARD-{generate_random_integer(self.AWARD_MIN, self.AWARD_MAX)}" + + def _reference_number(self, ref_num=None) -> str: + return ( + f"2023-{generate_random_integer(self.REF_MIN, self.REF_MAX)}" + if ref_num is None + else f"2023-{ref_num}" + ) + + def _make_findings_uniform_guidance( + self, award_references, reference_numbers + ) -> dict: + entries = [] + for award_ref, ref_nums in zip(award_references, reference_numbers): + for ref_num in ref_nums: + entries.append( + { + "program": {"award_reference": award_ref}, + "findings": {"reference_number": ref_num}, + } + ) + + return { + "FindingsUniformGuidance": { + "auditee_uei": "ABB123456CCC", + "findings_uniform_guidance_entries": entries, + } + } + + def test_no_duplicate_references(self): + """ + Check that no error is returned when there are no duplicate reference numbers. + """ + range_size = generate_random_integer(2, 4) + sac = baker.make(SingleAuditChecklist) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + [self._award_reference() for _ in range(range_size)], + [[self._reference_number(self.REF_MIN + i)] for i in range(range_size)], + ) + errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_duplicate_references_for_award(self): + """ + Check that errors are returned for awards with duplicate reference numbers. + """ + range_size = generate_random_integer(2, 4) + sac = baker.make(SingleAuditChecklist) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + [self._award_reference() for _ in range(range_size)], + [ + [ + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MAX), + ] + ], + ) + errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) + for finding in sac.findings_uniform_guidance["FindingsUniformGuidance"][ + "findings_uniform_guidance_entries" + ]: + expected_error = { + "error": err_duplicate_finding_reference( + finding["program"]["award_reference"], + self._reference_number(self.REF_MIN), + ) + } + self.assertIn(expected_error, errors) + + def test_duplicate_references_for_historical_award(self): + """ + Check that errors are not returned for historical awards with duplicate reference numbers. + """ + range_size = generate_random_integer(2, 4) + sac = baker.make(SingleAuditChecklist) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + [self._award_reference() for _ in range(range_size)], + [ + [ + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MAX), + ] + ], + ) + sac.data_source = settings.CENSUS_DATA_SOURCE + InvalidRecord.reset() + InvalidRecord.append_validations_to_skip("check_finding_reference_uniqueness") + + errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_duplicate_finding_reference_numbers_with_waivers(self): + """ + Check that errors are not returned for report with duplicate reference numbers when the validation is waived. + """ + range_size = generate_random_integer(2, 4) + sac = baker.make(SingleAuditChecklist) + baker.make( + SacValidationWaiver, + report_id=sac, + waiver_types=[SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER], + ) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + [self._award_reference() for _ in range(range_size)], + [ + [ + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MIN), + self._reference_number(self.REF_MAX), + ] + ], + ) + sac.waiver_types = [SacValidationWaiver.TYPES.FINDING_REFERENCE_NUMBER] + errors = check_finding_reference_uniqueness(sac_validation_shape(sac)) + self.assertEqual(errors, []) diff --git a/backend/audit/cross_validation/test_check_findings_count_consistency.py b/backend/audit/cross_validation/test_check_findings_count_consistency.py index f831e63ef0..c3c0296ea2 100644 --- a/backend/audit/cross_validation/test_check_findings_count_consistency.py +++ b/backend/audit/cross_validation/test_check_findings_count_consistency.py @@ -1,103 +1,103 @@ -from django.test import TestCase -from audit.models import SingleAuditChecklist -from .errors import err_findings_count_inconsistent -from .check_findings_count_consistency import check_findings_count_consistency -from .sac_validation_shape import sac_validation_shape -from .utils import generate_random_integer -from model_bakery import baker - - -class CheckFindingsCountConsistencyTests(TestCase): - AWARD_MIN = 1000 - AWARD_MAX = 2000 - FINDINGS_MIN = 1 - FINDINGS_MAX = 5 - - def _make_federal_awards(self, findings_count) -> dict: - return { - "FederalAwards": { - "federal_awards": [ - { - "program": {"number_of_audit_findings": findings_count}, - "award_reference": f"AWARD-{self.AWARD_MIN}", - }, - { - "program": {"number_of_audit_findings": findings_count}, - "award_reference": f"AWARD-{self.AWARD_MAX}", - }, - ] - } - } - - def _make_findings_uniform_guidance(self, awards, mismatch) -> dict: - entries = [] - for award in awards["FederalAwards"]["federal_awards"]: - award_reference = award["award_reference"] - count = award["program"]["number_of_audit_findings"] - for _ in range(count + mismatch): - entries.append({"program": {"award_reference": award_reference}}) - - findings = ( - { - "auditee_uei": "AAA123456BBB", - "findings_uniform_guidance_entries": entries, - } - if len(entries) > 0 - else {"auditee_uei": "AAA123456BBB"} - ) - - return {"FindingsUniformGuidance": findings} - - def _make_sac(self, findings_count, mismatch=0) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.federal_awards = self._make_federal_awards(findings_count) - sac.findings_uniform_guidance = self._make_findings_uniform_guidance( - sac.federal_awards, mismatch - ) - return sac - - def test_zero_findings_count_report(self): - """Ensure no error is returned for consistent zero findings.""" - sac = self._make_sac(0) - errors = check_findings_count_consistency(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_findings_count_matches_across_workbooks(self): - """Ensure no error is returned for consistent findings count.""" - sac = self._make_sac( - generate_random_integer(self.FINDINGS_MIN, self.FINDINGS_MAX) - ) - errors = check_findings_count_consistency(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def _test_findings_count_mismatch(self, base_count, mismatch): - sac = self._make_sac(base_count, mismatch) - errors = check_findings_count_consistency(sac_validation_shape(sac)) - self.assertEqual( - len(errors), len(sac.federal_awards["FederalAwards"]["federal_awards"]) - ) - - for award in sac.federal_awards["FederalAwards"]["federal_awards"]: - award_reference = award["award_reference"] - expected_error = err_findings_count_inconsistent( - base_count, base_count + mismatch, award_reference - ) - self.assertIn({"error": expected_error}, errors) - - def test_reported_findings_exceed_declared_count(self): - """ - Expect errors when the number of findings in the Federal Awards Audit Findings workbook, - a.k.a the Findings Uniform Guidance workbook, exceeds those declared in the Federal Awards workbook. - """ - self._test_findings_count_mismatch( - generate_random_integer(2, 4), generate_random_integer(1, 2) - ) - - def test_declared_findings_exceed_reported_count(self): - """ - Expect errors when the number of findings in the Federal Awards workbook - exceeds those reported in the Federal Awards Audit Findings workbook. - """ - self._test_findings_count_mismatch( - generate_random_integer(2, 4), generate_random_integer(-2, -1) - ) +from django.test import TestCase +from audit.models import SingleAuditChecklist +from .errors import err_findings_count_inconsistent +from .check_findings_count_consistency import check_findings_count_consistency +from .sac_validation_shape import sac_validation_shape +from .utils import generate_random_integer +from model_bakery import baker + + +class CheckFindingsCountConsistencyTests(TestCase): + AWARD_MIN = 1000 + AWARD_MAX = 2000 + FINDINGS_MIN = 1 + FINDINGS_MAX = 5 + + def _make_federal_awards(self, findings_count) -> dict: + return { + "FederalAwards": { + "federal_awards": [ + { + "program": {"number_of_audit_findings": findings_count}, + "award_reference": f"AWARD-{self.AWARD_MIN}", + }, + { + "program": {"number_of_audit_findings": findings_count}, + "award_reference": f"AWARD-{self.AWARD_MAX}", + }, + ] + } + } + + def _make_findings_uniform_guidance(self, awards, mismatch) -> dict: + entries = [] + for award in awards["FederalAwards"]["federal_awards"]: + award_reference = award["award_reference"] + count = award["program"]["number_of_audit_findings"] + for _ in range(count + mismatch): + entries.append({"program": {"award_reference": award_reference}}) + + findings = ( + { + "auditee_uei": "AAA123456BBB", + "findings_uniform_guidance_entries": entries, + } + if len(entries) > 0 + else {"auditee_uei": "AAA123456BBB"} + ) + + return {"FindingsUniformGuidance": findings} + + def _make_sac(self, findings_count, mismatch=0) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.federal_awards = self._make_federal_awards(findings_count) + sac.findings_uniform_guidance = self._make_findings_uniform_guidance( + sac.federal_awards, mismatch + ) + return sac + + def test_zero_findings_count_report(self): + """Ensure no error is returned for consistent zero findings.""" + sac = self._make_sac(0) + errors = check_findings_count_consistency(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_findings_count_matches_across_workbooks(self): + """Ensure no error is returned for consistent findings count.""" + sac = self._make_sac( + generate_random_integer(self.FINDINGS_MIN, self.FINDINGS_MAX) + ) + errors = check_findings_count_consistency(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def _test_findings_count_mismatch(self, base_count, mismatch): + sac = self._make_sac(base_count, mismatch) + errors = check_findings_count_consistency(sac_validation_shape(sac)) + self.assertEqual( + len(errors), len(sac.federal_awards["FederalAwards"]["federal_awards"]) + ) + + for award in sac.federal_awards["FederalAwards"]["federal_awards"]: + award_reference = award["award_reference"] + expected_error = err_findings_count_inconsistent( + base_count, base_count + mismatch, award_reference + ) + self.assertIn({"error": expected_error}, errors) + + def test_reported_findings_exceed_declared_count(self): + """ + Expect errors when the number of findings in the Federal Awards Audit Findings workbook, + a.k.a the Findings Uniform Guidance workbook, exceeds those declared in the Federal Awards workbook. + """ + self._test_findings_count_mismatch( + generate_random_integer(2, 4), generate_random_integer(1, 2) + ) + + def test_declared_findings_exceed_reported_count(self): + """ + Expect errors when the number of findings in the Federal Awards workbook + exceeds those reported in the Federal Awards Audit Findings workbook. + """ + self._test_findings_count_mismatch( + generate_random_integer(2, 4), generate_random_integer(-2, -1) + ) diff --git a/backend/audit/cross_validation/test_check_ref_number_in_cap.py b/backend/audit/cross_validation/test_check_ref_number_in_cap.py index f7aecec9e1..08c98a778d 100644 --- a/backend/audit/cross_validation/test_check_ref_number_in_cap.py +++ b/backend/audit/cross_validation/test_check_ref_number_in_cap.py @@ -1,140 +1,140 @@ -from django.conf import settings -from census_historical_migration.invalid_record import InvalidRecord -from django.test import TestCase -from .utils import generate_random_integer, make_findings_uniform_guidance -from audit.models import SingleAuditChecklist -from .check_ref_number_in_cap import check_ref_number_in_cap -from .sac_validation_shape import sac_validation_shape -from .errors import err_missing_or_extra_references -from model_bakery import baker -from audit.fixtures.excel import ( - SECTION_NAMES, -) - - -class CheckRefNumberInCapTests(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.AUDITEE_UEI = "AAA123456BBB" - self.reference_1 = "2023-100" - self.reference_2 = "2023-200" - self.reference_3 = "2023-300" - - def _make_cap(self, refs) -> dict: - entries = [] - for ref in refs: - entries.append({"reference_number": ref}) - caps = ( - { - "auditee_uei": self.AUDITEE_UEI, - "corrective_action_plan_entries": entries, - } - if len(entries) > 0 - else {"auditee_uei": self.AUDITEE_UEI} - ) - return {"CorrectiveActionPlan": caps} - - def _make_sac(self, findings_refs, findings_text_refs) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.findings_uniform_guidance = make_findings_uniform_guidance( - findings_refs, self.AUDITEE_UEI - ) - sac.corrective_action_plan = self._make_cap(findings_text_refs) - - return sac - - def test_references_direct_match(self): - """When all references match directly, no errors should be raised.""" - sac = self._make_sac( - [self.reference_1, self.reference_2], [self.reference_1, self.reference_2] - ) - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_references_match_with_duplicates(self): - """When all references match but there are duplicates, no errors should be raised.""" - original_references = [self.reference_1, self.reference_2] - - duplicated_references = [ - ref - for ref in original_references - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac(original_references, duplicated_references) - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_missing_references(self): - """When there are missing references, an error should be raised.""" - duplicated_references = [ - ref - for ref in [self.reference_1] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac( - [self.reference_1, self.reference_2, self.reference_3], - duplicated_references, - ) - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - {self.reference_2, self.reference_3}, - None, - SECTION_NAMES.CORRECTIVE_ACTION_PLAN, - ) - self.assertIn({"error": expected_error}, errors) - - def test_extra_references(self): - """When there are extra references, an error should be raised.""" - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_2, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac([self.reference_1], duplicated_references) - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - None, - {self.reference_2, self.reference_3}, - SECTION_NAMES.CORRECTIVE_ACTION_PLAN, - ) - self.assertIn({"error": expected_error}, errors) - - def test_missing_and_extra_references(self): - """When there are missing and extra references, an error should be raised.""" - - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac( - [self.reference_1, self.reference_2], duplicated_references - ) - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - {self.reference_2}, {self.reference_3}, SECTION_NAMES.CORRECTIVE_ACTION_PLAN - ) - self.assertIn({"error": expected_error}, errors) - - def test_more_cap_references_less_findings_for_historical_captexts(self): - """When there are extra references, update InvalidRecord and do not raise error.""" - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_2, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac([], duplicated_references) - - sac.data_source = settings.CENSUS_DATA_SOURCE - InvalidRecord.reset() - InvalidRecord.append_validations_to_skip("check_ref_number_in_cap") - - errors = check_ref_number_in_cap(sac_validation_shape(sac)) - - self.assertEqual(errors, []) +from django.conf import settings +from census_historical_migration.invalid_record import InvalidRecord +from django.test import TestCase +from .utils import generate_random_integer, make_findings_uniform_guidance +from audit.models import SingleAuditChecklist +from .check_ref_number_in_cap import check_ref_number_in_cap +from .sac_validation_shape import sac_validation_shape +from .errors import err_missing_or_extra_references +from model_bakery import baker +from audit.fixtures.excel import ( + SECTION_NAMES, +) + + +class CheckRefNumberInCapTests(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.AUDITEE_UEI = "AAA123456BBB" + self.reference_1 = "2023-100" + self.reference_2 = "2023-200" + self.reference_3 = "2023-300" + + def _make_cap(self, refs) -> dict: + entries = [] + for ref in refs: + entries.append({"reference_number": ref}) + caps = ( + { + "auditee_uei": self.AUDITEE_UEI, + "corrective_action_plan_entries": entries, + } + if len(entries) > 0 + else {"auditee_uei": self.AUDITEE_UEI} + ) + return {"CorrectiveActionPlan": caps} + + def _make_sac(self, findings_refs, findings_text_refs) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.findings_uniform_guidance = make_findings_uniform_guidance( + findings_refs, self.AUDITEE_UEI + ) + sac.corrective_action_plan = self._make_cap(findings_text_refs) + + return sac + + def test_references_direct_match(self): + """When all references match directly, no errors should be raised.""" + sac = self._make_sac( + [self.reference_1, self.reference_2], [self.reference_1, self.reference_2] + ) + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_references_match_with_duplicates(self): + """When all references match but there are duplicates, no errors should be raised.""" + original_references = [self.reference_1, self.reference_2] + + duplicated_references = [ + ref + for ref in original_references + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac(original_references, duplicated_references) + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_missing_references(self): + """When there are missing references, an error should be raised.""" + duplicated_references = [ + ref + for ref in [self.reference_1] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac( + [self.reference_1, self.reference_2, self.reference_3], + duplicated_references, + ) + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + {self.reference_2, self.reference_3}, + None, + SECTION_NAMES.CORRECTIVE_ACTION_PLAN, + ) + self.assertIn({"error": expected_error}, errors) + + def test_extra_references(self): + """When there are extra references, an error should be raised.""" + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_2, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac([self.reference_1], duplicated_references) + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + None, + {self.reference_2, self.reference_3}, + SECTION_NAMES.CORRECTIVE_ACTION_PLAN, + ) + self.assertIn({"error": expected_error}, errors) + + def test_missing_and_extra_references(self): + """When there are missing and extra references, an error should be raised.""" + + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac( + [self.reference_1, self.reference_2], duplicated_references + ) + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + {self.reference_2}, {self.reference_3}, SECTION_NAMES.CORRECTIVE_ACTION_PLAN + ) + self.assertIn({"error": expected_error}, errors) + + def test_more_cap_references_less_findings_for_historical_captexts(self): + """When there are extra references, update InvalidRecord and do not raise error.""" + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_2, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac([], duplicated_references) + + sac.data_source = settings.CENSUS_DATA_SOURCE + InvalidRecord.reset() + InvalidRecord.append_validations_to_skip("check_ref_number_in_cap") + + errors = check_ref_number_in_cap(sac_validation_shape(sac)) + + self.assertEqual(errors, []) diff --git a/backend/audit/cross_validation/test_check_ref_number_in_findings_text.py b/backend/audit/cross_validation/test_check_ref_number_in_findings_text.py index 1e00671b81..890592b346 100644 --- a/backend/audit/cross_validation/test_check_ref_number_in_findings_text.py +++ b/backend/audit/cross_validation/test_check_ref_number_in_findings_text.py @@ -1,140 +1,140 @@ -from django.conf import settings -from census_historical_migration.invalid_record import InvalidRecord -from django.test import TestCase -from .utils import generate_random_integer, make_findings_uniform_guidance -from audit.models import SingleAuditChecklist -from .check_ref_number_in_findings_text import check_ref_number_in_findings_text -from .sac_validation_shape import sac_validation_shape -from .errors import err_missing_or_extra_references -from model_bakery import baker -from audit.fixtures.excel import ( - SECTION_NAMES, -) - - -class CheckRefNumberInFindingsTextTests(TestCase): - def setUp(self): - """Set up the common variables for the test cases.""" - self.AUDITEE_UEI = "AAA123456BBB" - self.reference_1 = "2023-001" - self.reference_2 = "2023-002" - self.reference_3 = "2023-003" - - def _make_findings_text(self, refs) -> dict: - entries = [] - for ref in refs: - entries.append({"reference_number": ref}) - findings_text = ( - { - "auditee_uei": self.AUDITEE_UEI, - "findings_text_entries": entries, - } - if len(entries) > 0 - else {"auditee_uei": self.AUDITEE_UEI} - ) - return {"FindingsText": findings_text} - - def _make_sac(self, findings_refs, findings_text_refs) -> SingleAuditChecklist: - sac = baker.make(SingleAuditChecklist) - sac.findings_uniform_guidance = make_findings_uniform_guidance( - findings_refs, self.AUDITEE_UEI - ) - sac.findings_text = self._make_findings_text(findings_text_refs) - - return sac - - def test_references_direct_match(self): - """When all references match directly, no errors should be raised.""" - sac = self._make_sac( - [self.reference_1, self.reference_2], [self.reference_1, self.reference_2] - ) - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_references_match_with_duplicates(self): - """When all references match but there are duplicates, no errors should be raised.""" - original_references = [self.reference_1, self.reference_2] - # Create a list with each reference duplicated n times, where n is generated randomly between 2 and 5 - duplicated_references = [ - ref - for ref in original_references - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac(original_references, duplicated_references) - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - self.assertEqual(errors, []) - - def test_missing_references(self): - """When there are missing references, an error should be raised.""" - duplicated_references = [ - ref - for ref in [self.reference_1] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac( - [self.reference_1, self.reference_2, self.reference_3], - duplicated_references, - ) - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - # Check if the error list mentions self.reference_2, self.reference_3 as missing from findings_text but present in findings_uniform_guidance - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - {self.reference_2, self.reference_3}, - None, - SECTION_NAMES.AUDIT_FINDINGS_TEXT, - ) - self.assertIn({"error": expected_error}, errors) - - def test_extra_references(self): - """When there are extra references, an error should be raised.""" - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_2, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac([self.reference_1], duplicated_references) - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - # Check if the error list mentions self.reference_2, self.reference_3 as extra from findings_text but present in findings_uniform_guidance - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - None, - {self.reference_2, self.reference_3}, - SECTION_NAMES.AUDIT_FINDINGS_TEXT, - ) - self.assertIn({"error": expected_error}, errors) - - def test_missing_and_extra_references(self): - """When there are missing and extra references, an error should be raised.""" - - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac( - [self.reference_1, self.reference_2], duplicated_references - ) - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - # Check if the error list mentions self.reference_2, self.reference_3 as extra from findings_text but present in findings_uniform_guidance - self.assertEqual(len(errors), 1) - expected_error = err_missing_or_extra_references( - {self.reference_2}, {self.reference_3}, SECTION_NAMES.AUDIT_FINDINGS_TEXT - ) - self.assertIn({"error": expected_error}, errors) - - def test_more_finding_references_less_findings_for_historical_findingstexts(self): - """When there are extra references, update InvalidRecord and do not raise error.""" - duplicated_references = [ - ref - for ref in [self.reference_1, self.reference_2, self.reference_3] - for _ in range(generate_random_integer(2, 5)) - ] - sac = self._make_sac([], duplicated_references) - - sac.data_source = settings.CENSUS_DATA_SOURCE - InvalidRecord.reset() - InvalidRecord.append_validations_to_skip("check_ref_number_in_findings_text") - - errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) - - self.assertEqual(errors, []) +from django.conf import settings +from census_historical_migration.invalid_record import InvalidRecord +from django.test import TestCase +from .utils import generate_random_integer, make_findings_uniform_guidance +from audit.models import SingleAuditChecklist +from .check_ref_number_in_findings_text import check_ref_number_in_findings_text +from .sac_validation_shape import sac_validation_shape +from .errors import err_missing_or_extra_references +from model_bakery import baker +from audit.fixtures.excel import ( + SECTION_NAMES, +) + + +class CheckRefNumberInFindingsTextTests(TestCase): + def setUp(self): + """Set up the common variables for the test cases.""" + self.AUDITEE_UEI = "AAA123456BBB" + self.reference_1 = "2023-001" + self.reference_2 = "2023-002" + self.reference_3 = "2023-003" + + def _make_findings_text(self, refs) -> dict: + entries = [] + for ref in refs: + entries.append({"reference_number": ref}) + findings_text = ( + { + "auditee_uei": self.AUDITEE_UEI, + "findings_text_entries": entries, + } + if len(entries) > 0 + else {"auditee_uei": self.AUDITEE_UEI} + ) + return {"FindingsText": findings_text} + + def _make_sac(self, findings_refs, findings_text_refs) -> SingleAuditChecklist: + sac = baker.make(SingleAuditChecklist) + sac.findings_uniform_guidance = make_findings_uniform_guidance( + findings_refs, self.AUDITEE_UEI + ) + sac.findings_text = self._make_findings_text(findings_text_refs) + + return sac + + def test_references_direct_match(self): + """When all references match directly, no errors should be raised.""" + sac = self._make_sac( + [self.reference_1, self.reference_2], [self.reference_1, self.reference_2] + ) + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_references_match_with_duplicates(self): + """When all references match but there are duplicates, no errors should be raised.""" + original_references = [self.reference_1, self.reference_2] + # Create a list with each reference duplicated n times, where n is generated randomly between 2 and 5 + duplicated_references = [ + ref + for ref in original_references + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac(original_references, duplicated_references) + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + self.assertEqual(errors, []) + + def test_missing_references(self): + """When there are missing references, an error should be raised.""" + duplicated_references = [ + ref + for ref in [self.reference_1] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac( + [self.reference_1, self.reference_2, self.reference_3], + duplicated_references, + ) + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + # Check if the error list mentions self.reference_2, self.reference_3 as missing from findings_text but present in findings_uniform_guidance + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + {self.reference_2, self.reference_3}, + None, + SECTION_NAMES.AUDIT_FINDINGS_TEXT, + ) + self.assertIn({"error": expected_error}, errors) + + def test_extra_references(self): + """When there are extra references, an error should be raised.""" + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_2, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac([self.reference_1], duplicated_references) + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + # Check if the error list mentions self.reference_2, self.reference_3 as extra from findings_text but present in findings_uniform_guidance + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + None, + {self.reference_2, self.reference_3}, + SECTION_NAMES.AUDIT_FINDINGS_TEXT, + ) + self.assertIn({"error": expected_error}, errors) + + def test_missing_and_extra_references(self): + """When there are missing and extra references, an error should be raised.""" + + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac( + [self.reference_1, self.reference_2], duplicated_references + ) + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + # Check if the error list mentions self.reference_2, self.reference_3 as extra from findings_text but present in findings_uniform_guidance + self.assertEqual(len(errors), 1) + expected_error = err_missing_or_extra_references( + {self.reference_2}, {self.reference_3}, SECTION_NAMES.AUDIT_FINDINGS_TEXT + ) + self.assertIn({"error": expected_error}, errors) + + def test_more_finding_references_less_findings_for_historical_findingstexts(self): + """When there are extra references, update InvalidRecord and do not raise error.""" + duplicated_references = [ + ref + for ref in [self.reference_1, self.reference_2, self.reference_3] + for _ in range(generate_random_integer(2, 5)) + ] + sac = self._make_sac([], duplicated_references) + + sac.data_source = settings.CENSUS_DATA_SOURCE + InvalidRecord.reset() + InvalidRecord.append_validations_to_skip("check_ref_number_in_findings_text") + + errors = check_ref_number_in_findings_text(sac_validation_shape(sac)) + + self.assertEqual(errors, []) diff --git a/backend/audit/cross_validation/utils.py b/backend/audit/cross_validation/utils.py index 5cf887e2d1..0eeb1f2ba5 100644 --- a/backend/audit/cross_validation/utils.py +++ b/backend/audit/cross_validation/utils.py @@ -1,28 +1,28 @@ -import random - - -def generate_random_integer(min, max): - """Generate a random integer between min and max.""" - return random.randint(min, max) # nosec - - -def format_refs(ref_set: set) -> str: - """Format a set of references as a comma-separated string.""" - return ", ".join(sorted(ref_set)) - - -def make_findings_uniform_guidance(refs, auditee_uei) -> dict: - entries = [] - for ref in refs: - entries.append({"findings": {"reference_number": ref}}) - - findings = ( - { - "auditee_uei": auditee_uei, - "findings_uniform_guidance_entries": entries, - } - if len(entries) > 0 - else {"auditee_uei": auditee_uei} - ) - - return {"FindingsUniformGuidance": findings} +import random + + +def generate_random_integer(min, max): + """Generate a random integer between min and max.""" + return random.randint(min, max) # nosec + + +def format_refs(ref_set: set) -> str: + """Format a set of references as a comma-separated string.""" + return ", ".join(sorted(ref_set)) + + +def make_findings_uniform_guidance(refs, auditee_uei) -> dict: + entries = [] + for ref in refs: + entries.append({"findings": {"reference_number": ref}}) + + findings = ( + { + "auditee_uei": auditee_uei, + "findings_uniform_guidance_entries": entries, + } + if len(entries) > 0 + else {"auditee_uei": auditee_uei} + ) + + return {"FindingsUniformGuidance": findings} diff --git a/backend/audit/intakelib/transforms/xform_add_transform_for_cfda_key.py b/backend/audit/intakelib/transforms/xform_add_transform_for_cfda_key.py index 1439aaa7fd..d667ee82d9 100644 --- a/backend/audit/intakelib/transforms/xform_add_transform_for_cfda_key.py +++ b/backend/audit/intakelib/transforms/xform_add_transform_for_cfda_key.py @@ -1,21 +1,21 @@ -import logging -from audit.intakelib.intermediate_representation import ( - get_range_by_name, - replace_range_by_name, -) - -logger = logging.getLogger(__name__) - - -def generate_cfda_keys(ir): - cfda_keys = [] - federal_agency_prefixes = get_range_by_name(ir, "federal_agency_prefix") - three_digit_extensions = get_range_by_name(ir, "three_digit_extension") - for prefix, extension in zip( - federal_agency_prefixes["values"], three_digit_extensions["values"] - ): - cfda_keys.append(f"{prefix}.{extension}" if prefix and extension else "") - - xform_ir = replace_range_by_name(ir, "cfda_key", cfda_keys) - - return xform_ir +import logging +from audit.intakelib.intermediate_representation import ( + get_range_by_name, + replace_range_by_name, +) + +logger = logging.getLogger(__name__) + + +def generate_cfda_keys(ir): + cfda_keys = [] + federal_agency_prefixes = get_range_by_name(ir, "federal_agency_prefix") + three_digit_extensions = get_range_by_name(ir, "three_digit_extension") + for prefix, extension in zip( + federal_agency_prefixes["values"], three_digit_extensions["values"] + ): + cfda_keys.append(f"{prefix}.{extension}" if prefix and extension else "") + + xform_ir = replace_range_by_name(ir, "cfda_key", cfda_keys) + + return xform_ir diff --git a/backend/audit/test_admin.py b/backend/audit/test_admin.py index f72c81ba17..43a36cd7a2 100644 --- a/backend/audit/test_admin.py +++ b/backend/audit/test_admin.py @@ -1,170 +1,170 @@ -from django.test import TestCase, RequestFactory -from django.contrib.admin.sites import AdminSite -from django.contrib.auth.models import User -from django.contrib.messages.storage.fallback import FallbackStorage -from .models import SacValidationWaiver, SingleAuditChecklist -from .admin import SacValidationWaiverAdmin -from django.utils import timezone -from model_bakery import baker -from django.contrib.sessions.middleware import SessionMiddleware -from django.contrib.messages.middleware import MessageMiddleware - - -class MockRequest: - def __init__(self, user): - self.user = user - self.session = {} - self._messages = FallbackStorage(self) - - -class TestSacValidationWaiverAdmin(TestCase): - - def setUp(self): - - self.user = User.objects.create_user( - username="testuser", password="12345" - ) # nosec - # Create a SingleAuditChecklist instance - self.sac = baker.make( - SingleAuditChecklist, - submission_status=SingleAuditChecklist.STATUS.READY_FOR_CERTIFICATION, - ) - - # Create a request object - self.factory = RequestFactory() - self.request = self.factory.post("/admin/audit/sacvalidationwaiver/add/") - self.request.user = self.user - - # Add session and message middleware to the request - self.middleware_process(self.request) - - # Set up the Admin site and Admin class - self.site = AdminSite() - self.admin = SacValidationWaiverAdmin(SacValidationWaiver, self.site) - - def middleware_process(self, request): - """Apply middleware to the request object""" - # Create and apply session middleware - session_middleware = SessionMiddleware(lambda req: None) - session_middleware.process_request(request) - request.session.save() - # Create and apply message middleware - message_middleware = MessageMiddleware(lambda req: None) - message_middleware.process_request(request) - - def test_save_model_auditor_certification(self): - """Test the save_model method of the SacValidationWaiverAdmin class when the waiver is for auditor certification""" - # Create a SacValidationWaiver instance - waiver = baker.make( - SacValidationWaiver, - report_id=self.sac, - timestamp=timezone.now(), - approver_email="approver@example.com", - approver_name="Approver Name", - requester_email="requester@example.com", - requester_name="Requester Name", - justification="Test justification", - waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], - ) - - form = SacValidationWaiverAdmin.form(instance=waiver) - self.admin.save_model(self.request, waiver, form, change=False) - - # Checking results - self.sac.refresh_from_db() - self.assertEqual( - self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED - ) - - def test_save_model_auditee_certification(self): - """Test the save_model method of the SacValidationWaiverAdmin class when the waiver is for auditee certification""" - # Set the SAC status to AUDITOR_CERTIFIED - self.sac.submission_status = SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED - self.sac.save() - - # Create a SacValidationWaiver instance - waiver = baker.make( - SacValidationWaiver, - report_id=self.sac, - timestamp=timezone.now(), - approver_email="approver@example.com", - approver_name="Approver Name", - requester_email="requester@example.com", - requester_name="Requester Name", - justification="Test justification", - waiver_types=[SacValidationWaiver.TYPES.AUDITEE_CERTIFYING_OFFICIAL], - ) - - form = SacValidationWaiverAdmin.form(instance=waiver) - self.admin.save_model(self.request, waiver, form, change=False) - - # Checking results - self.sac.refresh_from_db() - self.assertEqual( - self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITEE_CERTIFIED - ) - - def test_save_model_invalid_status(self): - # Set SAC status to an invalid one - self.sac.submission_status = "INVALID_STATUS" - self.sac.save() - - # Create a SacValidationWaiver instance - waiver = baker.make( - SacValidationWaiver, - report_id=self.sac, - timestamp=timezone.now(), - approver_email="approver@example.com", - approver_name="Approver Name", - requester_email="requester@example.com", - requester_name="Requester Name", - justification="Test justification", - waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], - ) - - form = SacValidationWaiverAdmin.form(instance=waiver) - self.admin.save_model(self.request, waiver, form, change=False) - - # Check if the expected error message was added - messages = list(self.request._messages) - self.assertEqual(len(messages), 1) - self.assertIn("Cannot apply waiver to SAC with status", messages[0].message) - - def test_handle_auditor_certification(self): - """Test the handle_auditor_certification method of the SacValidation""" - # Simulating auditor certification - waiver = baker.make( - SacValidationWaiver, - report_id=self.sac, - timestamp=timezone.now(), - waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], - ) - self.admin.handle_auditor_certification(self.request, waiver, self.sac) - - # Checking results - self.sac.refresh_from_db() - self.assertEqual( - self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED - ) - - def test_handle_auditee_certification(self): - """Test the handle_auditee_certification method of the SacValidationWaiverAdmin class.""" - - # Set SAC status to AUDITOR_CERTIFIED - self.sac.submission_status = SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED - self.sac.save() - - # Simulating auditee certification - waiver = baker.make( - SacValidationWaiver, - report_id=self.sac, - timestamp=timezone.now(), - waiver_types=[SacValidationWaiver.TYPES.AUDITEE_CERTIFYING_OFFICIAL], - ) - self.admin.handle_auditee_certification(self.request, waiver, self.sac) - - # Checking results - self.sac.refresh_from_db() - self.assertEqual( - self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITEE_CERTIFIED - ) +from django.test import TestCase, RequestFactory +from django.contrib.admin.sites import AdminSite +from django.contrib.auth.models import User +from django.contrib.messages.storage.fallback import FallbackStorage +from .models import SacValidationWaiver, SingleAuditChecklist +from .admin import SacValidationWaiverAdmin +from django.utils import timezone +from model_bakery import baker +from django.contrib.sessions.middleware import SessionMiddleware +from django.contrib.messages.middleware import MessageMiddleware + + +class MockRequest: + def __init__(self, user): + self.user = user + self.session = {} + self._messages = FallbackStorage(self) + + +class TestSacValidationWaiverAdmin(TestCase): + + def setUp(self): + + self.user = User.objects.create_user( + username="testuser", password="12345" + ) # nosec + # Create a SingleAuditChecklist instance + self.sac = baker.make( + SingleAuditChecklist, + submission_status=SingleAuditChecklist.STATUS.READY_FOR_CERTIFICATION, + ) + + # Create a request object + self.factory = RequestFactory() + self.request = self.factory.post("/admin/audit/sacvalidationwaiver/add/") + self.request.user = self.user + + # Add session and message middleware to the request + self.middleware_process(self.request) + + # Set up the Admin site and Admin class + self.site = AdminSite() + self.admin = SacValidationWaiverAdmin(SacValidationWaiver, self.site) + + def middleware_process(self, request): + """Apply middleware to the request object""" + # Create and apply session middleware + session_middleware = SessionMiddleware(lambda req: None) + session_middleware.process_request(request) + request.session.save() + # Create and apply message middleware + message_middleware = MessageMiddleware(lambda req: None) + message_middleware.process_request(request) + + def test_save_model_auditor_certification(self): + """Test the save_model method of the SacValidationWaiverAdmin class when the waiver is for auditor certification""" + # Create a SacValidationWaiver instance + waiver = baker.make( + SacValidationWaiver, + report_id=self.sac, + timestamp=timezone.now(), + approver_email="approver@example.com", + approver_name="Approver Name", + requester_email="requester@example.com", + requester_name="Requester Name", + justification="Test justification", + waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], + ) + + form = SacValidationWaiverAdmin.form(instance=waiver) + self.admin.save_model(self.request, waiver, form, change=False) + + # Checking results + self.sac.refresh_from_db() + self.assertEqual( + self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED + ) + + def test_save_model_auditee_certification(self): + """Test the save_model method of the SacValidationWaiverAdmin class when the waiver is for auditee certification""" + # Set the SAC status to AUDITOR_CERTIFIED + self.sac.submission_status = SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED + self.sac.save() + + # Create a SacValidationWaiver instance + waiver = baker.make( + SacValidationWaiver, + report_id=self.sac, + timestamp=timezone.now(), + approver_email="approver@example.com", + approver_name="Approver Name", + requester_email="requester@example.com", + requester_name="Requester Name", + justification="Test justification", + waiver_types=[SacValidationWaiver.TYPES.AUDITEE_CERTIFYING_OFFICIAL], + ) + + form = SacValidationWaiverAdmin.form(instance=waiver) + self.admin.save_model(self.request, waiver, form, change=False) + + # Checking results + self.sac.refresh_from_db() + self.assertEqual( + self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITEE_CERTIFIED + ) + + def test_save_model_invalid_status(self): + # Set SAC status to an invalid one + self.sac.submission_status = "INVALID_STATUS" + self.sac.save() + + # Create a SacValidationWaiver instance + waiver = baker.make( + SacValidationWaiver, + report_id=self.sac, + timestamp=timezone.now(), + approver_email="approver@example.com", + approver_name="Approver Name", + requester_email="requester@example.com", + requester_name="Requester Name", + justification="Test justification", + waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], + ) + + form = SacValidationWaiverAdmin.form(instance=waiver) + self.admin.save_model(self.request, waiver, form, change=False) + + # Check if the expected error message was added + messages = list(self.request._messages) + self.assertEqual(len(messages), 1) + self.assertIn("Cannot apply waiver to SAC with status", messages[0].message) + + def test_handle_auditor_certification(self): + """Test the handle_auditor_certification method of the SacValidation""" + # Simulating auditor certification + waiver = baker.make( + SacValidationWaiver, + report_id=self.sac, + timestamp=timezone.now(), + waiver_types=[SacValidationWaiver.TYPES.AUDITOR_CERTIFYING_OFFICIAL], + ) + self.admin.handle_auditor_certification(self.request, waiver, self.sac) + + # Checking results + self.sac.refresh_from_db() + self.assertEqual( + self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED + ) + + def test_handle_auditee_certification(self): + """Test the handle_auditee_certification method of the SacValidationWaiverAdmin class.""" + + # Set SAC status to AUDITOR_CERTIFIED + self.sac.submission_status = SingleAuditChecklist.STATUS.AUDITOR_CERTIFIED + self.sac.save() + + # Simulating auditee certification + waiver = baker.make( + SacValidationWaiver, + report_id=self.sac, + timestamp=timezone.now(), + waiver_types=[SacValidationWaiver.TYPES.AUDITEE_CERTIFYING_OFFICIAL], + ) + self.admin.handle_auditee_certification(self.request, waiver, self.sac) + + # Checking results + self.sac.refresh_from_db() + self.assertEqual( + self.sac.submission_status, SingleAuditChecklist.STATUS.AUDITEE_CERTIFIED + ) diff --git a/backend/audit/test_utils.py b/backend/audit/test_utils.py index 5a36f6b42e..604829001d 100644 --- a/backend/audit/test_utils.py +++ b/backend/audit/test_utils.py @@ -1,38 +1,38 @@ -from django.test import SimpleTestCase -from .utils import Util - - -class TestRemoveExtraFields(SimpleTestCase): - def setUp(self): - self.data = { - "auditor_country": "USA", - "auditor_address_line_1": "123 Main St", - "auditor_city": "Anytown", - "auditor_state": "CA", - "auditor_zip": "90210", - "auditor_international_address": "", - "audit_period_covered": "biennial", - "audit_period_other_months": "", - } - - def test_remove_international_address_for_usa_without_address(self): - result = Util.remove_extra_fields(self.data) - self.assertNotIn("auditor_international_address", result) - - def test_remove_usa_address_fields_for_non_usa_without_usa_address_fields(self): - # Non provided fields are returned as empty strings from the form - self.data["auditor_address_line_1"] = "" - self.data["auditor_city"] = "" - self.data["auditor_state"] = "" - self.data["auditor_zip"] = "" - self.data["auditor_country"] = "non-USA" - self.data["auditor_international_address"] = "123 International St" - result = Util.remove_extra_fields(self.data) - self.assertNotIn("auditor_zip", result) - self.assertNotIn("auditor_state", result) - self.assertNotIn("auditor_address_line_1", result) - self.assertNotIn("auditor_city", result) - - def test_remove_audit_period_other_months_when_not_other(self): - result = Util.remove_extra_fields(self.data) - self.assertNotIn("audit_period_other_months", result) +from django.test import SimpleTestCase +from .utils import Util + + +class TestRemoveExtraFields(SimpleTestCase): + def setUp(self): + self.data = { + "auditor_country": "USA", + "auditor_address_line_1": "123 Main St", + "auditor_city": "Anytown", + "auditor_state": "CA", + "auditor_zip": "90210", + "auditor_international_address": "", + "audit_period_covered": "biennial", + "audit_period_other_months": "", + } + + def test_remove_international_address_for_usa_without_address(self): + result = Util.remove_extra_fields(self.data) + self.assertNotIn("auditor_international_address", result) + + def test_remove_usa_address_fields_for_non_usa_without_usa_address_fields(self): + # Non provided fields are returned as empty strings from the form + self.data["auditor_address_line_1"] = "" + self.data["auditor_city"] = "" + self.data["auditor_state"] = "" + self.data["auditor_zip"] = "" + self.data["auditor_country"] = "non-USA" + self.data["auditor_international_address"] = "123 International St" + result = Util.remove_extra_fields(self.data) + self.assertNotIn("auditor_zip", result) + self.assertNotIn("auditor_state", result) + self.assertNotIn("auditor_address_line_1", result) + self.assertNotIn("auditor_city", result) + + def test_remove_audit_period_other_months_when_not_other(self): + result = Util.remove_extra_fields(self.data) + self.assertNotIn("audit_period_other_months", result) diff --git a/backend/census_historical_migration/admin.py b/backend/census_historical_migration/admin.py index d8ad4c682f..5d930d3ee7 100644 --- a/backend/census_historical_migration/admin.py +++ b/backend/census_historical_migration/admin.py @@ -1,29 +1,29 @@ -from django.contrib import admin - -from .models import ( - ELECAUDITHEADER, - ELECEINS, - ELECAUDITFINDINGS, - ELECNOTES, - ELECFINDINGSTEXT, - ELECCPAS, - ELECAUDITS, - ELECPASSTHROUGH, - ELECUEIS, - ELECCAPTEXT, - ReportMigrationStatus, - MigrationErrorDetail, -) - -admin.site.register(ELECAUDITHEADER) -admin.site.register(ELECEINS) -admin.site.register(ELECAUDITFINDINGS) -admin.site.register(ELECNOTES) -admin.site.register(ELECFINDINGSTEXT) -admin.site.register(ELECCPAS) -admin.site.register(ELECAUDITS) -admin.site.register(ELECPASSTHROUGH) -admin.site.register(ELECUEIS) -admin.site.register(ELECCAPTEXT) -admin.site.register(ReportMigrationStatus) -admin.site.register(MigrationErrorDetail) +from django.contrib import admin + +from .models import ( + ELECAUDITHEADER, + ELECEINS, + ELECAUDITFINDINGS, + ELECNOTES, + ELECFINDINGSTEXT, + ELECCPAS, + ELECAUDITS, + ELECPASSTHROUGH, + ELECUEIS, + ELECCAPTEXT, + ReportMigrationStatus, + MigrationErrorDetail, +) + +admin.site.register(ELECAUDITHEADER) +admin.site.register(ELECEINS) +admin.site.register(ELECAUDITFINDINGS) +admin.site.register(ELECNOTES) +admin.site.register(ELECFINDINGSTEXT) +admin.site.register(ELECCPAS) +admin.site.register(ELECAUDITS) +admin.site.register(ELECPASSTHROUGH) +admin.site.register(ELECUEIS) +admin.site.register(ELECCAPTEXT) +admin.site.register(ReportMigrationStatus) +admin.site.register(MigrationErrorDetail) diff --git a/backend/census_historical_migration/apps.py b/backend/census_historical_migration/apps.py index b36afe4117..9cbf332c3c 100644 --- a/backend/census_historical_migration/apps.py +++ b/backend/census_historical_migration/apps.py @@ -1,6 +1,6 @@ -from django.apps import AppConfig - - -class CensusHistoricalMigrationConfig(AppConfig): - default_auto_field = "django.db.models.BigAutoField" - name = "census_historical_migration" +from django.apps import AppConfig + + +class CensusHistoricalMigrationConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "census_historical_migration" diff --git a/backend/census_historical_migration/base_field_maps.py b/backend/census_historical_migration/base_field_maps.py index 66488cc030..be21c333c1 100644 --- a/backend/census_historical_migration/base_field_maps.py +++ b/backend/census_historical_migration/base_field_maps.py @@ -1,30 +1,30 @@ -FormFieldInDissem = WorkbookFieldInDissem = 1000 - - -class BaseFieldMap: - def __init__(self, in_db, in_dissem, default, type): - self.in_db = in_db - self.in_dissem = in_dissem - self.default = default - self.type = type - - -# This provides a way to map the sheet in the workbook to the -# column in the DB. It also has a default value and -# the type of value, so that things can be set correctly -# before filling in the XLSX workbooks. -# Define a new named tuple with an additional field -class SheetFieldMap(BaseFieldMap): - def __init__(self, in_sheet, *args, **kwargs): - super().__init__(*args, **kwargs) - self.in_sheet = in_sheet - - -# This provides a way to map fields in the form to the -# column in the DB. It also has a default value and -# the type of value, so that things can be set correctly -# before creating an initial sac. -class FormFieldMap(BaseFieldMap): - def __init__(self, in_form, *args, **kwargs): - super().__init__(*args, **kwargs) - self.in_form = in_form +FormFieldInDissem = WorkbookFieldInDissem = 1000 + + +class BaseFieldMap: + def __init__(self, in_db, in_dissem, default, type): + self.in_db = in_db + self.in_dissem = in_dissem + self.default = default + self.type = type + + +# This provides a way to map the sheet in the workbook to the +# column in the DB. It also has a default value and +# the type of value, so that things can be set correctly +# before filling in the XLSX workbooks. +# Define a new named tuple with an additional field +class SheetFieldMap(BaseFieldMap): + def __init__(self, in_sheet, *args, **kwargs): + super().__init__(*args, **kwargs) + self.in_sheet = in_sheet + + +# This provides a way to map fields in the form to the +# column in the DB. It also has a default value and +# the type of value, so that things can be set correctly +# before creating an initial sac. +class FormFieldMap(BaseFieldMap): + def __init__(self, in_form, *args, **kwargs): + super().__init__(*args, **kwargs) + self.in_form = in_form diff --git a/backend/census_historical_migration/exception_utils.py b/backend/census_historical_migration/exception_utils.py index 9c29c460d3..2554b7e518 100644 --- a/backend/census_historical_migration/exception_utils.py +++ b/backend/census_historical_migration/exception_utils.py @@ -1,19 +1,19 @@ -class DataMigrationError(Exception): - """Exception raised for errors that occur during historic data migration.""" - - def __init__( - self, - message="An error occurred during historic data migration", - tag="", - ): - self.message = message - self.tag = tag - super().__init__(self.message) - - -class DataMigrationValueError(DataMigrationError): - """Exception raised for value errors that occur during historic data migration.""" - - -class CrossValidationError(DataMigrationError): - """Exception raised for value errors that occur during historic data migration.""" +class DataMigrationError(Exception): + """Exception raised for errors that occur during historic data migration.""" + + def __init__( + self, + message="An error occurred during historic data migration", + tag="", + ): + self.message = message + self.tag = tag + super().__init__(self.message) + + +class DataMigrationValueError(DataMigrationError): + """Exception raised for value errors that occur during historic data migration.""" + + +class CrossValidationError(DataMigrationError): + """Exception raised for value errors that occur during historic data migration.""" diff --git a/backend/census_historical_migration/invalid_migration_tags.py b/backend/census_historical_migration/invalid_migration_tags.py index c00e557e80..b8598d5765 100644 --- a/backend/census_historical_migration/invalid_migration_tags.py +++ b/backend/census_historical_migration/invalid_migration_tags.py @@ -1,15 +1,15 @@ -class INVALID_MIGRATION_TAGS: - """Constants for invalid migration tags.""" - - INVALID_FEDERAL_PROGRAM_TOTAL = "invalid_federal_program_total" - DUPLICATE_FINDING_REFERENCE_NUMBERS = "duplicate_finding_reference_numbers" - INCORRECT_CLUSTER_TOTAL = "incorrect_cluster_total" - EXTRA_FINDING_REFERENCE_NUMBERS_IN_CAPTEXT = ( - "extra_finding_reference_numbers_in_captext" - ) - EXTRA_FINDING_REFERENCE_NUMBERS_IN_FINDINGSTEXT = ( - "extra_finding_reference_numbers_in_findingstext" - ) - INCORRECT_FINDINGS_COUNT = "incorrect_findings_count" - - ACE_AUDIT_REPORT = "ace_audit_report" +class INVALID_MIGRATION_TAGS: + """Constants for invalid migration tags.""" + + INVALID_FEDERAL_PROGRAM_TOTAL = "invalid_federal_program_total" + DUPLICATE_FINDING_REFERENCE_NUMBERS = "duplicate_finding_reference_numbers" + INCORRECT_CLUSTER_TOTAL = "incorrect_cluster_total" + EXTRA_FINDING_REFERENCE_NUMBERS_IN_CAPTEXT = ( + "extra_finding_reference_numbers_in_captext" + ) + EXTRA_FINDING_REFERENCE_NUMBERS_IN_FINDINGSTEXT = ( + "extra_finding_reference_numbers_in_findingstext" + ) + INCORRECT_FINDINGS_COUNT = "incorrect_findings_count" + + ACE_AUDIT_REPORT = "ace_audit_report" diff --git a/backend/census_historical_migration/management/commands/reprocess_failed_migration.py b/backend/census_historical_migration/management/commands/reprocess_failed_migration.py index 4791c29ba8..547bded9c5 100644 --- a/backend/census_historical_migration/management/commands/reprocess_failed_migration.py +++ b/backend/census_historical_migration/management/commands/reprocess_failed_migration.py @@ -1,47 +1,47 @@ -from census_historical_migration.process_failed_migration import ( - reprocess_failed_reports, -) -from census_historical_migration.sac_general_lib.utils import ( - normalize_year_string_or_exit, -) - -from django.core.management.base import BaseCommand - -import logging -import sys - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.WARNING) - - -class Command(BaseCommand): - help = """ - Reprocess failed migration reports for a given year and error tag using pagination - Usage: - manage.py run_migration - --year - --page_size - --pages - --error_tag - """ - - def add_arguments(self, parser): - parser.add_argument("--year", help="Audit Year") - parser.add_argument("--page_size", help="Number of records by page", type=int) - parser.add_argument("--pages", help="comma separated pages", type=str) - parser.add_argument("--error_tag", help="error tag", type=str) - - def handle(self, *args, **options): - year = normalize_year_string_or_exit(options.get("year")) - - try: - pages_str = options["pages"] - pages = list(map(lambda d: int(d), pages_str.split(","))) - except ValueError: - logger.error(f"Found a non-integer in pages '{pages_str}'") - sys.exit(-1) - - reprocess_failed_reports( - year, options["page_size"], pages, options["error_tag"] - ) +from census_historical_migration.process_failed_migration import ( + reprocess_failed_reports, +) +from census_historical_migration.sac_general_lib.utils import ( + normalize_year_string_or_exit, +) + +from django.core.management.base import BaseCommand + +import logging +import sys + + +logger = logging.getLogger(__name__) +logger.setLevel(logging.WARNING) + + +class Command(BaseCommand): + help = """ + Reprocess failed migration reports for a given year and error tag using pagination + Usage: + manage.py run_migration + --year + --page_size + --pages + --error_tag + """ + + def add_arguments(self, parser): + parser.add_argument("--year", help="Audit Year") + parser.add_argument("--page_size", help="Number of records by page", type=int) + parser.add_argument("--pages", help="comma separated pages", type=str) + parser.add_argument("--error_tag", help="error tag", type=str) + + def handle(self, *args, **options): + year = normalize_year_string_or_exit(options.get("year")) + + try: + pages_str = options["pages"] + pages = list(map(lambda d: int(d), pages_str.split(","))) + except ValueError: + logger.error(f"Found a non-integer in pages '{pages_str}'") + sys.exit(-1) + + reprocess_failed_reports( + year, options["page_size"], pages, options["error_tag"] + ) diff --git a/backend/census_historical_migration/sac_general_lib/audit_information.py b/backend/census_historical_migration/sac_general_lib/audit_information.py index 44c1f50180..57652e6de4 100644 --- a/backend/census_historical_migration/sac_general_lib/audit_information.py +++ b/backend/census_historical_migration/sac_general_lib/audit_information.py @@ -1,394 +1,394 @@ -import re - -from django.conf import settings - -from census_historical_migration.invalid_migration_tags import INVALID_MIGRATION_TAGS -from census_historical_migration.invalid_record import InvalidRecord -from census_historical_migration.report_type_flag import AceFlag -from ..transforms.xform_string_to_int import string_to_int -from ..transforms.xform_string_to_bool import string_to_bool -from ..transforms.xform_string_to_string import string_to_string -from ..exception_utils import DataMigrationError -from ..workbooklib.excel_creation_utils import get_audits, track_invalid_records -from ..base_field_maps import FormFieldMap, FormFieldInDissem -from ..sac_general_lib.utils import ( - create_json_from_db_object, - is_single_word, -) -import audit.validators -from ..change_record import InspectionRecord, CensusRecord, GsaFacRecord -from audit.utils import Util - - -def xform_apply_default_thresholds(value): - """Applies default threshold when value is None.""" - # Transformation to be documented. - str_value = string_to_string(value) - if str_value == "": - return settings.GSA_MIGRATION_INT - return string_to_int(str_value) - - -mappings = [ - FormFieldMap( - "dollar_threshold", - "DOLLARTHRESHOLD", - FormFieldInDissem, - None, - xform_apply_default_thresholds, - ), - FormFieldMap( - "is_going_concern_included", "GOINGCONCERN", FormFieldInDissem, None, bool - ), - FormFieldMap( - "is_internal_control_deficiency_disclosed", - "MATERIALWEAKNESS", - FormFieldInDissem, - None, - bool, - ), - FormFieldMap( - "is_internal_control_material_weakness_disclosed", - "MATERIALWEAKNESS_MP", - FormFieldInDissem, - None, - bool, - ), - FormFieldMap( - "is_material_noncompliance_disclosed", - "MATERIALNONCOMPLIANCE", - FormFieldInDissem, - None, - bool, - ), - FormFieldMap( - "is_aicpa_audit_guide_included", - "REPORTABLECONDITION", - FormFieldInDissem, - None, - bool, - ), - FormFieldMap("is_low_risk_auditee", "LOWRISK", FormFieldInDissem, None, bool), - FormFieldMap("agencies", "PYSCHEDULE", "agencies_with_prior_findings", [], list), -] - - -def _get_agency_prefixes(dbkey, year): - """Returns the agency prefixes for a given dbkey and audit year.""" - agencies = set() - audits = get_audits(dbkey, year) - - for audit_detail in audits: - prefix = ( - string_to_string(audit_detail.CFDA_PREFIX) - if audit_detail.CFDA_PREFIX - else string_to_string(audit_detail.CFDA).split(".")[0] - ) - agencies.add(prefix) - - return agencies - - -def xform_framework_basis(basis): - """Transforms the framework basis from Census format to FAC format. - For context, see ticket #2912. - """ - # Transformation recorded (see xform_build_sp_framework_gaap_results). - basis = string_to_string(basis) - if is_single_word(basis): - mappings = { - r"cash": "cash_basis", - r"contractual": "contractual_basis", - r"regulatory": "regulatory_basis", - r"tax": "tax_basis", - r"other": "other_basis", - } - # Check each pattern in the mappings with case-insensitive search - for pattern, value in mappings.items(): - if re.search(pattern, basis, re.IGNORECASE): - return value - - raise DataMigrationError( - f"Could not find a match for historic framework basis: '{basis}'", - "invalid_basis", - ) - - -def xform_census_keys_to_fac_options(census_keys, fac_options): - """Maps the census keys to FAC options. - For context, see ticket #2912. - """ - # Transformation recorded (see xform_build_sp_framework_gaap_results). - if "U" in census_keys: - fac_options.append("unmodified_opinion") - if "Q" in census_keys: - fac_options.append("qualified_opinion") - if "A" in census_keys: - fac_options.append("adverse_opinion") - if "D" in census_keys: - fac_options.append("disclaimer_of_opinion") - - -def xform_build_sp_framework_gaap_results(audit_header): - """Returns the SP Framework and GAAP results for a given audit header.""" - # Transformation recorded. - sp_framework_gaap_data = string_to_string(audit_header.TYPEREPORT_FS).upper() - if not sp_framework_gaap_data: - raise DataMigrationError( - f"GAAP details are missing for DBKEY: {audit_header.DBKEY}", - "missing_gaap", - ) - - sp_framework_gaap_results = {} - sp_framework_gaap_results["gaap_results"] = [] - xform_census_keys_to_fac_options( - sp_framework_gaap_data, sp_framework_gaap_results["gaap_results"] - ) - - if "S" in sp_framework_gaap_data: - sp_framework_gaap_results["gaap_results"].append("not_gaap") - sp_framework_gaap_results["is_sp_framework_required"] = string_to_bool( - audit_header.SP_FRAMEWORK_REQUIRED - ) - sp_framework_gaap_results["sp_framework_opinions"] = [] - sp_framework_opinions = string_to_string( - audit_header.TYPEREPORT_SP_FRAMEWORK - ).upper() - xform_census_keys_to_fac_options( - sp_framework_opinions, sp_framework_gaap_results["sp_framework_opinions"] - ) - sp_framework_gaap_results["sp_framework_basis"] = [] - basis = xform_framework_basis(audit_header.SP_FRAMEWORK) - sp_framework_gaap_results["sp_framework_basis"].append(basis) - - track_transformations(sp_framework_gaap_results, audit_header) - - return sp_framework_gaap_results - - -def track_transformations(sp_framework_gaap_results, audit_header): - """Tracks all transformations related to the special framework data.""" - - if sp_framework_gaap_results["gaap_results"]: - census_data = [ - CensusRecord( - column="TYPEREPORT_FS", - value=audit_header.TYPEREPORT_FS, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="gaap_results", - value=Util.json_array_to_str(sp_framework_gaap_results["gaap_results"]), - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": [ - "xform_build_sp_framework_gaap_results", - "xform_census_keys_to_fac_options", - ], - } - ) - - if ( - "is_sp_framework_required" in sp_framework_gaap_results - and sp_framework_gaap_results["is_sp_framework_required"] - != settings.GSA_MIGRATION - ): - census_data = [ - CensusRecord( - column="TYPEREPORT_FS", - value=audit_header.TYPEREPORT_FS, - ).to_dict(), - CensusRecord( - column="SP_FRAMEWORK_REQUIRED", - value=audit_header.SP_FRAMEWORK_REQUIRED, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="is_sp_framework_required", - value=Util.optional_bool( - sp_framework_gaap_results["is_sp_framework_required"] - ), - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": ["xform_build_sp_framework_gaap_results"], - } - ) - - if "sp_framework_opinions" in sp_framework_gaap_results: - census_data = [ - CensusRecord( - column="TYPEREPORT_FS", - value=audit_header.TYPEREPORT_FS, - ).to_dict(), - CensusRecord( - column="TYPEREPORT_SP_FRAMEWORK", - value=audit_header.TYPEREPORT_SP_FRAMEWORK, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="sp_framework_opinions", - value=Util.json_array_to_str( - sp_framework_gaap_results["sp_framework_opinions"] - ), - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": [ - "xform_build_sp_framework_gaap_results", - "xform_census_keys_to_fac_options", - ], - } - ) - - if "sp_framework_basis" in sp_framework_gaap_results: - census_data = [ - CensusRecord( - column="TYPEREPORT_FS", - value=audit_header.TYPEREPORT_FS, - ).to_dict(), - CensusRecord( - column="SP_FRAMEWORK", - value=audit_header.SP_FRAMEWORK, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="sp_framework_basis", - value=Util.json_array_to_str( - sp_framework_gaap_results["sp_framework_basis"] - ), - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": [ - "xform_build_sp_framework_gaap_results", - "xform_framework_basis", - ], - } - ) - - -def xform_sp_framework_required(audit_header): - if ( - "S" in string_to_string(audit_header.TYPEREPORT_FS) - and string_to_string(audit_header.SP_FRAMEWORK_REQUIRED) == "" - ): - census_data = [ - CensusRecord( - column="TYPEREPORT_FS", - value=audit_header.TYPEREPORT_FS, - ).to_dict(), - CensusRecord( - column="SP_FRAMEWORK_REQUIRED", - value=audit_header.SP_FRAMEWORK_REQUIRED, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="is_sp_framework_required", value=settings.GSA_MIGRATION - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": [ - "xform_sp_framework_required", - ], - } - ) - audit_header.SP_FRAMEWORK_REQUIRED = settings.GSA_MIGRATION - - -def xform_lowrisk(audit_header): - if string_to_string(audit_header.LOWRISK) == "": - census_data = [ - CensusRecord( - column="LOWRISK", - value=audit_header.LOWRISK, - ).to_dict(), - ] - gsa_fac_data = GsaFacRecord( - field="is_low_risk_auditee", value=settings.GSA_MIGRATION - ).to_dict() - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": [ - "xform_lowrisk", - ], - } - ) - audit_header.LOWRISK = settings.GSA_MIGRATION - - -def audit_information(audit_header): - """Generates audit information JSON.""" - if AceFlag.get_ace_report_flag(): - audit_info = ace_audit_information(audit_header) - else: - xform_sp_framework_required(audit_header) - xform_lowrisk(audit_header) - results = xform_build_sp_framework_gaap_results(audit_header) - audit_info = create_json_from_db_object(audit_header, mappings) - audit_info = { - key: results.get(key, audit_info.get(key)) - for key in set(audit_info) | set(results) - } - - agencies_prefixes = _get_agency_prefixes(audit_header.DBKEY, audit_header.AUDITYEAR) - audit_info["agencies"] = list(agencies_prefixes) - - audit.validators.validate_audit_information_json(audit_info) - - return audit_info - - -def ace_audit_information(audit_header): - """Constructs the audit information JSON object for an ACE report.""" - actual = create_json_from_db_object(audit_header, mappings) - default = { - "dollar_threshold": settings.GSA_MIGRATION_INT, - "gaap_results": [settings.GSA_MIGRATION], - "is_going_concern_included": settings.GSA_MIGRATION, - "is_internal_control_deficiency_disclosed": settings.GSA_MIGRATION, - "is_internal_control_material_weakness_disclosed": settings.GSA_MIGRATION, - "is_material_noncompliance_disclosed": settings.GSA_MIGRATION, - "is_aicpa_audit_guide_included": settings.GSA_MIGRATION, - "is_low_risk_auditee": settings.GSA_MIGRATION, - "agencies": [settings.GSA_MIGRATION], - } - if actual: - for key, value in actual.items(): - if value: - default[key] = value - # Tracking invalid records - invalid_records = [] - for mapping in mappings: - in_dissem = ( - mapping.in_dissem - if mapping.in_dissem != FormFieldInDissem - else mapping.in_form - ) - track_invalid_records( - [ - (mapping.in_db, getattr(audit_header, mapping.in_db)), - ], - in_dissem, - default[mapping.in_form], - invalid_records, - ) - if invalid_records: - InvalidRecord.append_invalid_migration_tag( - INVALID_MIGRATION_TAGS.ACE_AUDIT_REPORT, - ) - - return default +import re + +from django.conf import settings + +from census_historical_migration.invalid_migration_tags import INVALID_MIGRATION_TAGS +from census_historical_migration.invalid_record import InvalidRecord +from census_historical_migration.report_type_flag import AceFlag +from ..transforms.xform_string_to_int import string_to_int +from ..transforms.xform_string_to_bool import string_to_bool +from ..transforms.xform_string_to_string import string_to_string +from ..exception_utils import DataMigrationError +from ..workbooklib.excel_creation_utils import get_audits, track_invalid_records +from ..base_field_maps import FormFieldMap, FormFieldInDissem +from ..sac_general_lib.utils import ( + create_json_from_db_object, + is_single_word, +) +import audit.validators +from ..change_record import InspectionRecord, CensusRecord, GsaFacRecord +from audit.utils import Util + + +def xform_apply_default_thresholds(value): + """Applies default threshold when value is None.""" + # Transformation to be documented. + str_value = string_to_string(value) + if str_value == "": + return settings.GSA_MIGRATION_INT + return string_to_int(str_value) + + +mappings = [ + FormFieldMap( + "dollar_threshold", + "DOLLARTHRESHOLD", + FormFieldInDissem, + None, + xform_apply_default_thresholds, + ), + FormFieldMap( + "is_going_concern_included", "GOINGCONCERN", FormFieldInDissem, None, bool + ), + FormFieldMap( + "is_internal_control_deficiency_disclosed", + "MATERIALWEAKNESS", + FormFieldInDissem, + None, + bool, + ), + FormFieldMap( + "is_internal_control_material_weakness_disclosed", + "MATERIALWEAKNESS_MP", + FormFieldInDissem, + None, + bool, + ), + FormFieldMap( + "is_material_noncompliance_disclosed", + "MATERIALNONCOMPLIANCE", + FormFieldInDissem, + None, + bool, + ), + FormFieldMap( + "is_aicpa_audit_guide_included", + "REPORTABLECONDITION", + FormFieldInDissem, + None, + bool, + ), + FormFieldMap("is_low_risk_auditee", "LOWRISK", FormFieldInDissem, None, bool), + FormFieldMap("agencies", "PYSCHEDULE", "agencies_with_prior_findings", [], list), +] + + +def _get_agency_prefixes(dbkey, year): + """Returns the agency prefixes for a given dbkey and audit year.""" + agencies = set() + audits = get_audits(dbkey, year) + + for audit_detail in audits: + prefix = ( + string_to_string(audit_detail.CFDA_PREFIX) + if audit_detail.CFDA_PREFIX + else string_to_string(audit_detail.CFDA).split(".")[0] + ) + agencies.add(prefix) + + return agencies + + +def xform_framework_basis(basis): + """Transforms the framework basis from Census format to FAC format. + For context, see ticket #2912. + """ + # Transformation recorded (see xform_build_sp_framework_gaap_results). + basis = string_to_string(basis) + if is_single_word(basis): + mappings = { + r"cash": "cash_basis", + r"contractual": "contractual_basis", + r"regulatory": "regulatory_basis", + r"tax": "tax_basis", + r"other": "other_basis", + } + # Check each pattern in the mappings with case-insensitive search + for pattern, value in mappings.items(): + if re.search(pattern, basis, re.IGNORECASE): + return value + + raise DataMigrationError( + f"Could not find a match for historic framework basis: '{basis}'", + "invalid_basis", + ) + + +def xform_census_keys_to_fac_options(census_keys, fac_options): + """Maps the census keys to FAC options. + For context, see ticket #2912. + """ + # Transformation recorded (see xform_build_sp_framework_gaap_results). + if "U" in census_keys: + fac_options.append("unmodified_opinion") + if "Q" in census_keys: + fac_options.append("qualified_opinion") + if "A" in census_keys: + fac_options.append("adverse_opinion") + if "D" in census_keys: + fac_options.append("disclaimer_of_opinion") + + +def xform_build_sp_framework_gaap_results(audit_header): + """Returns the SP Framework and GAAP results for a given audit header.""" + # Transformation recorded. + sp_framework_gaap_data = string_to_string(audit_header.TYPEREPORT_FS).upper() + if not sp_framework_gaap_data: + raise DataMigrationError( + f"GAAP details are missing for DBKEY: {audit_header.DBKEY}", + "missing_gaap", + ) + + sp_framework_gaap_results = {} + sp_framework_gaap_results["gaap_results"] = [] + xform_census_keys_to_fac_options( + sp_framework_gaap_data, sp_framework_gaap_results["gaap_results"] + ) + + if "S" in sp_framework_gaap_data: + sp_framework_gaap_results["gaap_results"].append("not_gaap") + sp_framework_gaap_results["is_sp_framework_required"] = string_to_bool( + audit_header.SP_FRAMEWORK_REQUIRED + ) + sp_framework_gaap_results["sp_framework_opinions"] = [] + sp_framework_opinions = string_to_string( + audit_header.TYPEREPORT_SP_FRAMEWORK + ).upper() + xform_census_keys_to_fac_options( + sp_framework_opinions, sp_framework_gaap_results["sp_framework_opinions"] + ) + sp_framework_gaap_results["sp_framework_basis"] = [] + basis = xform_framework_basis(audit_header.SP_FRAMEWORK) + sp_framework_gaap_results["sp_framework_basis"].append(basis) + + track_transformations(sp_framework_gaap_results, audit_header) + + return sp_framework_gaap_results + + +def track_transformations(sp_framework_gaap_results, audit_header): + """Tracks all transformations related to the special framework data.""" + + if sp_framework_gaap_results["gaap_results"]: + census_data = [ + CensusRecord( + column="TYPEREPORT_FS", + value=audit_header.TYPEREPORT_FS, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="gaap_results", + value=Util.json_array_to_str(sp_framework_gaap_results["gaap_results"]), + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": [ + "xform_build_sp_framework_gaap_results", + "xform_census_keys_to_fac_options", + ], + } + ) + + if ( + "is_sp_framework_required" in sp_framework_gaap_results + and sp_framework_gaap_results["is_sp_framework_required"] + != settings.GSA_MIGRATION + ): + census_data = [ + CensusRecord( + column="TYPEREPORT_FS", + value=audit_header.TYPEREPORT_FS, + ).to_dict(), + CensusRecord( + column="SP_FRAMEWORK_REQUIRED", + value=audit_header.SP_FRAMEWORK_REQUIRED, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="is_sp_framework_required", + value=Util.optional_bool( + sp_framework_gaap_results["is_sp_framework_required"] + ), + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": ["xform_build_sp_framework_gaap_results"], + } + ) + + if "sp_framework_opinions" in sp_framework_gaap_results: + census_data = [ + CensusRecord( + column="TYPEREPORT_FS", + value=audit_header.TYPEREPORT_FS, + ).to_dict(), + CensusRecord( + column="TYPEREPORT_SP_FRAMEWORK", + value=audit_header.TYPEREPORT_SP_FRAMEWORK, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="sp_framework_opinions", + value=Util.json_array_to_str( + sp_framework_gaap_results["sp_framework_opinions"] + ), + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": [ + "xform_build_sp_framework_gaap_results", + "xform_census_keys_to_fac_options", + ], + } + ) + + if "sp_framework_basis" in sp_framework_gaap_results: + census_data = [ + CensusRecord( + column="TYPEREPORT_FS", + value=audit_header.TYPEREPORT_FS, + ).to_dict(), + CensusRecord( + column="SP_FRAMEWORK", + value=audit_header.SP_FRAMEWORK, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="sp_framework_basis", + value=Util.json_array_to_str( + sp_framework_gaap_results["sp_framework_basis"] + ), + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": [ + "xform_build_sp_framework_gaap_results", + "xform_framework_basis", + ], + } + ) + + +def xform_sp_framework_required(audit_header): + if ( + "S" in string_to_string(audit_header.TYPEREPORT_FS) + and string_to_string(audit_header.SP_FRAMEWORK_REQUIRED) == "" + ): + census_data = [ + CensusRecord( + column="TYPEREPORT_FS", + value=audit_header.TYPEREPORT_FS, + ).to_dict(), + CensusRecord( + column="SP_FRAMEWORK_REQUIRED", + value=audit_header.SP_FRAMEWORK_REQUIRED, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="is_sp_framework_required", value=settings.GSA_MIGRATION + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": [ + "xform_sp_framework_required", + ], + } + ) + audit_header.SP_FRAMEWORK_REQUIRED = settings.GSA_MIGRATION + + +def xform_lowrisk(audit_header): + if string_to_string(audit_header.LOWRISK) == "": + census_data = [ + CensusRecord( + column="LOWRISK", + value=audit_header.LOWRISK, + ).to_dict(), + ] + gsa_fac_data = GsaFacRecord( + field="is_low_risk_auditee", value=settings.GSA_MIGRATION + ).to_dict() + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": [ + "xform_lowrisk", + ], + } + ) + audit_header.LOWRISK = settings.GSA_MIGRATION + + +def audit_information(audit_header): + """Generates audit information JSON.""" + if AceFlag.get_ace_report_flag(): + audit_info = ace_audit_information(audit_header) + else: + xform_sp_framework_required(audit_header) + xform_lowrisk(audit_header) + results = xform_build_sp_framework_gaap_results(audit_header) + audit_info = create_json_from_db_object(audit_header, mappings) + audit_info = { + key: results.get(key, audit_info.get(key)) + for key in set(audit_info) | set(results) + } + + agencies_prefixes = _get_agency_prefixes(audit_header.DBKEY, audit_header.AUDITYEAR) + audit_info["agencies"] = list(agencies_prefixes) + + audit.validators.validate_audit_information_json(audit_info) + + return audit_info + + +def ace_audit_information(audit_header): + """Constructs the audit information JSON object for an ACE report.""" + actual = create_json_from_db_object(audit_header, mappings) + default = { + "dollar_threshold": settings.GSA_MIGRATION_INT, + "gaap_results": [settings.GSA_MIGRATION], + "is_going_concern_included": settings.GSA_MIGRATION, + "is_internal_control_deficiency_disclosed": settings.GSA_MIGRATION, + "is_internal_control_material_weakness_disclosed": settings.GSA_MIGRATION, + "is_material_noncompliance_disclosed": settings.GSA_MIGRATION, + "is_aicpa_audit_guide_included": settings.GSA_MIGRATION, + "is_low_risk_auditee": settings.GSA_MIGRATION, + "agencies": [settings.GSA_MIGRATION], + } + if actual: + for key, value in actual.items(): + if value: + default[key] = value + # Tracking invalid records + invalid_records = [] + for mapping in mappings: + in_dissem = ( + mapping.in_dissem + if mapping.in_dissem != FormFieldInDissem + else mapping.in_form + ) + track_invalid_records( + [ + (mapping.in_db, getattr(audit_header, mapping.in_db)), + ], + in_dissem, + default[mapping.in_form], + invalid_records, + ) + if invalid_records: + InvalidRecord.append_invalid_migration_tag( + INVALID_MIGRATION_TAGS.ACE_AUDIT_REPORT, + ) + + return default diff --git a/backend/census_historical_migration/sac_general_lib/auditee_certification.py b/backend/census_historical_migration/sac_general_lib/auditee_certification.py index ff47cd49d8..ff90ffceed 100644 --- a/backend/census_historical_migration/sac_general_lib/auditee_certification.py +++ b/backend/census_historical_migration/sac_general_lib/auditee_certification.py @@ -1,56 +1,56 @@ -import audit.validators -from ..base_field_maps import FormFieldMap, FormFieldInDissem -from ..sac_general_lib.utils import ( - create_json_from_db_object, -) - -from datetime import date - - -# The following fields represent checkboxes on the auditee certification form. -# Since all checkboxes must be checked (meaning all fields are set to True), -# the default value for these fields is set to `Y`. These fields are not disseminated. -# They are set to ensure that the record passes validation when saved. -auditee_certification_mappings = [ - FormFieldMap("has_no_PII", None, FormFieldInDissem, "Y", bool), - FormFieldMap("has_no_BII", None, FormFieldInDissem, "Y", bool), - FormFieldMap("meets_2CFR_specifications", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_2CFR_compliant", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_complete_and_accurate", None, FormFieldInDissem, "Y", bool), - FormFieldMap("has_engaged_auditor", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_issued_and_signed", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_FAC_releasable", None, FormFieldInDissem, "Y", bool), -] - -# auditee_certification_date_signed is not disseminated; it is set to ensure that the record passes validation when saved. -auditee_signature_mappings = [ - FormFieldMap("auditee_name", "AUDITEENAME", FormFieldInDissem, None, str), - FormFieldMap("auditee_title", "AUDITEETITLE", FormFieldInDissem, None, str), - FormFieldMap( - "auditee_certification_date_signed", None, FormFieldInDissem, None, str - ), -] - - -def _xform_set_certification_date(auditee_certification): - """Sets the default auditee certification date to today's date.""" - auditee_certification["auditee_signature"][ - "auditee_certification_date_signed" - ] = date.today().isoformat() - return auditee_certification - - -def auditee_certification(audit_header): - """Generates auditee certification JSON.""" - certification = {} - certification["auditee_certification"] = create_json_from_db_object( - audit_header, auditee_certification_mappings - ) - certification["auditee_signature"] = create_json_from_db_object( - audit_header, auditee_signature_mappings - ) - certification = _xform_set_certification_date(certification) - - audit.validators.validate_auditee_certification_json(certification) - - return certification +import audit.validators +from ..base_field_maps import FormFieldMap, FormFieldInDissem +from ..sac_general_lib.utils import ( + create_json_from_db_object, +) + +from datetime import date + + +# The following fields represent checkboxes on the auditee certification form. +# Since all checkboxes must be checked (meaning all fields are set to True), +# the default value for these fields is set to `Y`. These fields are not disseminated. +# They are set to ensure that the record passes validation when saved. +auditee_certification_mappings = [ + FormFieldMap("has_no_PII", None, FormFieldInDissem, "Y", bool), + FormFieldMap("has_no_BII", None, FormFieldInDissem, "Y", bool), + FormFieldMap("meets_2CFR_specifications", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_2CFR_compliant", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_complete_and_accurate", None, FormFieldInDissem, "Y", bool), + FormFieldMap("has_engaged_auditor", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_issued_and_signed", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_FAC_releasable", None, FormFieldInDissem, "Y", bool), +] + +# auditee_certification_date_signed is not disseminated; it is set to ensure that the record passes validation when saved. +auditee_signature_mappings = [ + FormFieldMap("auditee_name", "AUDITEENAME", FormFieldInDissem, None, str), + FormFieldMap("auditee_title", "AUDITEETITLE", FormFieldInDissem, None, str), + FormFieldMap( + "auditee_certification_date_signed", None, FormFieldInDissem, None, str + ), +] + + +def _xform_set_certification_date(auditee_certification): + """Sets the default auditee certification date to today's date.""" + auditee_certification["auditee_signature"][ + "auditee_certification_date_signed" + ] = date.today().isoformat() + return auditee_certification + + +def auditee_certification(audit_header): + """Generates auditee certification JSON.""" + certification = {} + certification["auditee_certification"] = create_json_from_db_object( + audit_header, auditee_certification_mappings + ) + certification["auditee_signature"] = create_json_from_db_object( + audit_header, auditee_signature_mappings + ) + certification = _xform_set_certification_date(certification) + + audit.validators.validate_auditee_certification_json(certification) + + return certification diff --git a/backend/census_historical_migration/sac_general_lib/auditor_certification.py b/backend/census_historical_migration/sac_general_lib/auditor_certification.py index ff71b53a3e..513df15dbc 100644 --- a/backend/census_historical_migration/sac_general_lib/auditor_certification.py +++ b/backend/census_historical_migration/sac_general_lib/auditor_certification.py @@ -1,54 +1,54 @@ -import audit.validators -from ..base_field_maps import FormFieldMap, FormFieldInDissem -from ..sac_general_lib.utils import ( - create_json_from_db_object, -) - -from datetime import date - - -# The following fields represent checkboxes on the auditor certification form. -# Since all checkboxes must be checked (meaning all fields are set to True), -# the default value for these fields is set to `Y`. These fields are not disseminated. -# They are set to ensure that the record passes validation when saved -auditor_certification_mappings = [ - FormFieldMap("is_OMB_limited", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_auditee_responsible", None, FormFieldInDissem, "Y", bool), - FormFieldMap("has_used_auditors_report", None, FormFieldInDissem, "Y", bool), - FormFieldMap("has_no_auditee_procedures", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_accurate_and_complete", None, FormFieldInDissem, "Y", bool), - FormFieldMap("is_FAC_releasable", None, FormFieldInDissem, "Y", bool), -] - -# auditor_certification_date_signed is not disseminated; it is set to ensure that the record passes validation when saved. -auditor_signature_mappings = [ - FormFieldMap("auditor_name", "CPACONTACT", FormFieldInDissem, None, str), - FormFieldMap("auditor_title", "CPATITLE", FormFieldInDissem, None, str), - FormFieldMap( - "auditor_certification_date_signed", None, FormFieldInDissem, None, str - ), -] - - -def _xform_set_certification_date(auditor_certification): - """Sets the default auditor certification date to current date.""" - auditor_certification["auditor_signature"][ - "auditor_certification_date_signed" - ] = date.today().isoformat() - return auditor_certification - - -def auditor_certification(audit_header): - """Generates auditor certification JSON.""" - certification = {} - certification["auditor_certification"] = create_json_from_db_object( - audit_header, auditor_certification_mappings - ) - certification["auditor_signature"] = create_json_from_db_object( - audit_header, auditor_signature_mappings - ) - certification = _xform_set_certification_date(certification) - - audit.validators.validate_auditor_certification_json(certification) - - return certification +import audit.validators +from ..base_field_maps import FormFieldMap, FormFieldInDissem +from ..sac_general_lib.utils import ( + create_json_from_db_object, +) + +from datetime import date + + +# The following fields represent checkboxes on the auditor certification form. +# Since all checkboxes must be checked (meaning all fields are set to True), +# the default value for these fields is set to `Y`. These fields are not disseminated. +# They are set to ensure that the record passes validation when saved +auditor_certification_mappings = [ + FormFieldMap("is_OMB_limited", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_auditee_responsible", None, FormFieldInDissem, "Y", bool), + FormFieldMap("has_used_auditors_report", None, FormFieldInDissem, "Y", bool), + FormFieldMap("has_no_auditee_procedures", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_accurate_and_complete", None, FormFieldInDissem, "Y", bool), + FormFieldMap("is_FAC_releasable", None, FormFieldInDissem, "Y", bool), +] + +# auditor_certification_date_signed is not disseminated; it is set to ensure that the record passes validation when saved. +auditor_signature_mappings = [ + FormFieldMap("auditor_name", "CPACONTACT", FormFieldInDissem, None, str), + FormFieldMap("auditor_title", "CPATITLE", FormFieldInDissem, None, str), + FormFieldMap( + "auditor_certification_date_signed", None, FormFieldInDissem, None, str + ), +] + + +def _xform_set_certification_date(auditor_certification): + """Sets the default auditor certification date to current date.""" + auditor_certification["auditor_signature"][ + "auditor_certification_date_signed" + ] = date.today().isoformat() + return auditor_certification + + +def auditor_certification(audit_header): + """Generates auditor certification JSON.""" + certification = {} + certification["auditor_certification"] = create_json_from_db_object( + audit_header, auditor_certification_mappings + ) + certification["auditor_signature"] = create_json_from_db_object( + audit_header, auditor_signature_mappings + ) + certification = _xform_set_certification_date(certification) + + audit.validators.validate_auditor_certification_json(certification) + + return certification diff --git a/backend/census_historical_migration/sac_general_lib/cognizant_oversight.py b/backend/census_historical_migration/sac_general_lib/cognizant_oversight.py index 48f3c60d63..1ace229132 100644 --- a/backend/census_historical_migration/sac_general_lib/cognizant_oversight.py +++ b/backend/census_historical_migration/sac_general_lib/cognizant_oversight.py @@ -1,15 +1,15 @@ -from ..exception_utils import DataMigrationError -from ..transforms.xform_string_to_string import string_to_string - - -def cognizant_oversight(audit_header): - """Retrieve cognizant oversight information for a given audit header.""" - - cognizant = string_to_string(audit_header.COGAGENCY) - oversight = string_to_string(audit_header.OVERSIGHTAGENCY) - if cognizant and len(cognizant) > 2: - raise DataMigrationError( - f"Invalid cognizant agency: {cognizant}", "invalid_cognizant" - ) - - return cognizant, oversight +from ..exception_utils import DataMigrationError +from ..transforms.xform_string_to_string import string_to_string + + +def cognizant_oversight(audit_header): + """Retrieve cognizant oversight information for a given audit header.""" + + cognizant = string_to_string(audit_header.COGAGENCY) + oversight = string_to_string(audit_header.OVERSIGHTAGENCY) + if cognizant and len(cognizant) > 2: + raise DataMigrationError( + f"Invalid cognizant agency: {cognizant}", "invalid_cognizant" + ) + + return cognizant, oversight diff --git a/backend/census_historical_migration/sac_general_lib/general_information.py b/backend/census_historical_migration/sac_general_lib/general_information.py index ca15b88534..8a972bfa30 100644 --- a/backend/census_historical_migration/sac_general_lib/general_information.py +++ b/backend/census_historical_migration/sac_general_lib/general_information.py @@ -1,590 +1,590 @@ -import json -import re -from datetime import timedelta - -from django.conf import settings - -import audit.validators -from ..workbooklib.additional_ueis import get_ueis -from ..workbooklib.additional_eins import get_eins -from ..transforms.xform_retrieve_uei import xform_retrieve_uei -from ..transforms.xform_remove_hyphen_and_pad_zip import xform_remove_hyphen_and_pad_zip -from ..transforms.xform_string_to_string import string_to_string -from ..exception_utils import DataMigrationError -from ..sac_general_lib.utils import ( - xform_census_date_to_datetime, -) -from ..base_field_maps import FormFieldMap, FormFieldInDissem -from ..sac_general_lib.utils import ( - create_json_from_db_object, -) -from jsonschema import validate -from jsonschema.exceptions import ValidationError -from ..change_record import InspectionRecord, CensusRecord, GsaFacRecord -from ..report_type_flag import AceFlag - -PERIOD_DICT = {"A": "annual", "B": "biennial", "O": "other"} -AUDIT_TYPE_DICT = { - "S": "single-audit", - "P": "program-specific", - "A": "alternative-compliance-engagement", -} - - -def xform_entity_type(phrase): - """Transforms the entity type from Census format to FAC format. - For context, see ticket #2912. - """ - # Transformation recorded. - mappings = { - r"institution\s+of\s+higher\s+education": "higher-ed", - r"non-?profit": "non-profit", - r"local\s+government": "local", - r"state": "state", - r"unknown": "unknown", - r"trib(e|al)": "tribal", - } - new_phrase = string_to_string(phrase) - - # Check each pattern in the mappings with case-insensitive search - for pattern, value in mappings.items(): - if re.search(pattern, new_phrase, re.IGNORECASE): - track_transformations( - "ENTITY_TYPE", - phrase, - "entity_type", - value, - "xform_entity_type", - ) - return value - raise DataMigrationError( - f"Could not find a match for historic entity type '{phrase}'", - "invalid_historic_entity_type", - ) - - -mappings = [ - FormFieldMap( - "auditee_fiscal_period_start", "FYENDDATE", "fy_start_date", None, str - ), - FormFieldMap("auditee_fiscal_period_end", "FYENDDATE", "fy_end_date", None, str), - FormFieldMap("audit_period_covered", "PERIODCOVERED", FormFieldInDissem, None, str), - FormFieldMap("audit_type", "AUDITTYPE", FormFieldInDissem, None, str), - FormFieldMap("auditee_address_line_1", "STREET1", FormFieldInDissem, None, str), - FormFieldMap("auditee_city", "CITY", FormFieldInDissem, None, str), - FormFieldMap( - "auditee_contact_name", "AUDITEECONTACT", FormFieldInDissem, None, str - ), - FormFieldMap("auditee_contact_title", "AUDITEETITLE", FormFieldInDissem, None, str), - FormFieldMap("auditee_email", "AUDITEEEMAIL", FormFieldInDissem, None, str), - FormFieldMap("auditee_name", "AUDITEENAME", FormFieldInDissem, None, str), - FormFieldMap("auditee_phone", "AUDITEEPHONE", FormFieldInDissem, None, str), - FormFieldMap("auditee_state", "STATE", FormFieldInDissem, None, str), - FormFieldMap( - "auditee_uei", - "UEI", - FormFieldInDissem, - None, - xform_retrieve_uei, - ), - FormFieldMap( - "auditee_zip", - "ZIPCODE", - FormFieldInDissem, - None, - str, - ), - FormFieldMap("auditor_address_line_1", "CPASTREET1", FormFieldInDissem, None, str), - FormFieldMap("auditor_city", "CPACITY", FormFieldInDissem, None, str), - FormFieldMap("auditor_contact_name", "CPACONTACT", FormFieldInDissem, None, str), - FormFieldMap("auditor_contact_title", "CPATITLE", FormFieldInDissem, None, str), - FormFieldMap("auditor_country", "CPACOUNTRY", FormFieldInDissem, None, str), - FormFieldMap("auditor_ein", "AUDITOR_EIN", FormFieldInDissem, None, str), - FormFieldMap( - "auditor_ein_not_an_ssn_attestation", None, None, "Y", bool - ), # Not in DB, not disseminated, needed for validation - FormFieldMap("auditor_email", "CPAEMAIL", FormFieldInDissem, None, str), - FormFieldMap("auditor_firm_name", "CPAFIRMNAME", FormFieldInDissem, None, str), - FormFieldMap("auditor_phone", "CPAPHONE", FormFieldInDissem, None, str), - FormFieldMap("auditor_state", "CPASTATE", FormFieldInDissem, None, str), - FormFieldMap( - "auditor_zip", - "CPAZIPCODE", - FormFieldInDissem, - None, - str, - ), - FormFieldMap("ein", "EIN", "auditee_ein", None, str), - FormFieldMap( - "ein_not_an_ssn_attestation", None, None, "Y", bool - ), # Not in DB, not disseminated, needed for validation - FormFieldMap( - "is_usa_based", None, None, "Y", bool - ), # Not in DB, not disseminated, needed for validation - FormFieldMap( - "met_spending_threshold", None, None, "Y", bool - ), # Not in DB, not disseminated, needed for validation - FormFieldMap( - "multiple_eins_covered", "MULTIPLEEINS", None, None, bool - ), # In DB, not disseminated, needed for validation - FormFieldMap( - "multiple_ueis_covered", "MULTIPLEUEIS", "is_additional_ueis", None, bool - ), - FormFieldMap( - "user_provided_organization_type", - "ENTITY_TYPE", - "entity_type", - None, - xform_entity_type, - ), - FormFieldMap( - "secondary_auditors_exist", "MULTIPLE_CPAS", None, None, bool - ), # In DB, not disseminated, needed for validation -] - - -def is_uei_valid(uei): - try: - with open(f"{settings.OUTPUT_BASE_DIR}/UeiSchema.json") as schema: - schema_json = json.load(schema) - uei_schema = schema_json.get("properties")["uei"] - validate(instance=uei, schema=uei_schema) - return True - except FileNotFoundError: - raise DataMigrationError( - f"UeiSchema.json file not found in {settings.OUTPUT_BASE_DIR}", - "missing_uei_schema_json", - ) - except json.decoder.JSONDecodeError: - raise DataMigrationError( - "UeiSchema.json file contains invalid JSON.", "invalid_uei_schema_json" - ) - - except ValidationError: - return False - - except Exception as e: - raise DataMigrationError( - f"Error validating Auditee UEI: {e}", "cannot_valid_auditee_uei" - ) - - -def xform_update_multiple_eins_flag(audit_header): - """Updates the multiple_eins_covered flag. - This updates does not propagate to the database, it only updates the object. - """ - if not string_to_string(audit_header.MULTIPLEEINS): - eins = get_eins(audit_header.DBKEY, audit_header.AUDITYEAR) - audit_header.MULTIPLEEINS = "Y" if eins else "N" - - -def xform_update_multiple_ueis_flag(audit_header): - """Updates the multiple_ueis_covered flag. - This updates does not propagate to the database, it only updates the object. - """ - if not string_to_string(audit_header.MULTIPLEUEIS): - ueis = get_ueis(audit_header.DBKEY, audit_header.AUDITYEAR) - audit_header.MULTIPLEUEIS = "Y" if ueis else "N" - - -def xform_update_entity_type(audit_header): - """Updates ENTITY_TYPE. - This updates does not propagate to the database, it only updates the object. - """ - if string_to_string(audit_header.ENTITY_TYPE) == "": - audit_header.ENTITY_TYPE = ( - "tribal" - if string_to_string(audit_header.SUPPRESSION_CODE).upper() == "IT" - else "unknown" - ) - track_transformations( - "ENTITY_TYPE", - "", - "entity_type", - audit_header.ENTITY_TYPE, - "xform_update_entity_type", - ) - - -def _period_covered(s): - """Helper to transform the period covered from Census format to FAC format.""" - if s not in PERIOD_DICT: - raise DataMigrationError( - f"Key '{s}' not found in period coverage mapping", - "invalid_period_coverage_key", - ) - return PERIOD_DICT[s] - - -def _census_audit_type(s): - """Helper to transform the audit type from Census format to FAC format.""" - - if s not in AUDIT_TYPE_DICT: - raise DataMigrationError( - f"Key '{s}' not found in census audit type mapping", - "invalid_census_audit_type_key", - ) - return AUDIT_TYPE_DICT[s] - - -def xform_country(general_information, audit_header): - """Transforms the country from Census format to FAC format. This method is now deprecated.""" - # Transformation to be documented. - auditor_country = string_to_string( - general_information.get("auditor_country") - ).upper() - if auditor_country in ["US", "USA"]: - general_information["auditor_country"] = "USA" - elif auditor_country == "": - valid_file = open(f"{settings.SCHEMA_BASE_DIR}/States.json") - valid_json = json.load(valid_file) - auditor_state = string_to_string(audit_header.CPASTATE).upper() - if auditor_state in valid_json["UnitedStatesStateAbbr"]: - general_information["auditor_country"] = "USA" - else: - raise DataMigrationError( - f"Unable to determine auditor country. Invalid state: {auditor_state}", - "invalid_state", - ) - else: - raise DataMigrationError( - f"Unable to determine auditor country. Unknown code: {auditor_country}", - "invalid_country", - ) - - return general_information - - -def xform_country_v2(general_information, audit_header): - """Transforms the country from Census format to FAC format.""" - auditor_country = string_to_string( - general_information.get("auditor_country") - ).upper() - if auditor_country in ["US", "USA"]: - general_information["auditor_country"] = "USA" - elif auditor_country == "NON-US" and not ( - audit_header.CPAZIPCODE - or audit_header.CPASTATE - or audit_header.CPACITY - or audit_header.CPASTREET1 - ): - general_information["auditor_country"] = "non-USA" - general_information["auditor_international_address"] = audit_header.CPAFOREIGN - elif auditor_country == "": - valid_file = open(f"{settings.SCHEMA_BASE_DIR}/States.json") - valid_json = json.load(valid_file) - auditor_state = string_to_string(audit_header.CPASTATE).upper() - if auditor_state in valid_json["UnitedStatesStateAbbr"]: - general_information["auditor_country"] = "USA" - else: - raise DataMigrationError( - f"Unable to determine auditor country. Invalid state: {auditor_state}", - "invalid_state", - ) - else: - raise DataMigrationError( - f"Unable to determine auditor country. Unknown code: {auditor_country}", - "invalid_country", - ) - - return general_information - - -def xform_auditee_fiscal_period_end(general_information): - """Transforms the fiscal period end from Census format to FAC format.""" - # Transformation to be documented. - if general_information.get("auditee_fiscal_period_end"): - general_information["auditee_fiscal_period_end"] = ( - xform_census_date_to_datetime( - general_information.get("auditee_fiscal_period_end") - ).strftime("%Y-%m-%d") - ) - else: - raise DataMigrationError( - "Auditee fiscal period end is empty.", - "invalid_auditee_fiscal_period_end", - ) - - return general_information - - -def xform_auditee_fiscal_period_start(general_information): - """Constructs the fiscal period start from the fiscal period end""" - # Transformation to be documented. - fiscal_start_date = xform_census_date_to_datetime( - general_information.get("auditee_fiscal_period_end") - ) - timedelta(days=365) - general_information["auditee_fiscal_period_start"] = fiscal_start_date.strftime( - "%Y-%m-%d" - ) - - return general_information - - -def xform_audit_period_covered(general_information): - """Transforms the period covered from Census format to FAC format.""" - # Transformation recorded. - if general_information.get("audit_period_covered"): - value_in_db = general_information["audit_period_covered"] - general_information["audit_period_covered"] = _period_covered( - value_in_db.upper() - ) - track_transformations( - "PERIODCOVERED", - value_in_db, - "audit_period_covered", - general_information["audit_period_covered"], - "xform_audit_period_covered", - ) - else: - raise DataMigrationError( - f"Audit period covered is empty: {general_information.get('audit_period_covered')}", - "invalid_audit_period_covered", - ) - return general_information - - -def xform_audit_type(general_information): - """Transforms the audit type from Census format to FAC format.""" - # Transformation recorded. - if general_information.get("audit_type"): - value_in_db = general_information["audit_type"] - audit_type = _census_audit_type(value_in_db.upper()) - if audit_type == AUDIT_TYPE_DICT["A"]: - audit_type = settings.GSA_MIGRATION - AceFlag.set_ace_report_flag(True) - general_information["audit_type"] = audit_type - track_transformations( - "AUDITTYPE", - value_in_db, - "audit_type", - general_information["audit_type"], - "xform_audit_type", - ) - else: - raise DataMigrationError( - f"Audit type is empty: {general_information.get('audit_type')}", - "invalid_audit_type", - ) - return general_information - - -def xform_replace_empty_auditor_email(general_information): - """Replaces empty auditor email with GSA Migration keyword""" - # Transformation recorded. - if not general_information.get("auditor_email"): - general_information["auditor_email"] = settings.GSA_MIGRATION - track_transformations( - "CPAEMAIL", - "", - "auditor_email", - general_information["auditor_email"], - "xform_replace_empty_auditor_email", - ) - return general_information - - -def xform_replace_empty_auditee_email(general_information): - """Replaces empty auditee email with GSA Migration keyword""" - # Transformation recorded. - if not general_information.get("auditee_email"): - general_information["auditee_email"] = settings.GSA_MIGRATION - track_transformations( - "AUDITEEEMAIL", - "", - "auditee_email", - general_information["auditee_email"], - "xform_replace_empty_auditee_email", - ) - return general_information - - -def xform_replace_empty_auditee_contact_name(general_information): - """Replaces empty auditee contact name with GSA Migration keyword""" - # Transformation recorded. - if not general_information.get("auditee_contact_name"): - general_information["auditee_contact_name"] = settings.GSA_MIGRATION - track_transformations( - "AUDITEECONTACT", - "", - "auditee_contact_name", - general_information["auditee_contact_name"], - "xform_replace_empty_auditee_contact_name", - ) - return general_information - - -def xform_replace_empty_auditee_contact_title(general_information): - """Replaces empty auditee contact title with GSA Migration keyword""" - # Transformation recorded. - if not general_information.get("auditee_contact_title"): - general_information["auditee_contact_title"] = settings.GSA_MIGRATION - track_transformations( - "AUDITEETITLE", - "", - "auditee_contact_title", - general_information["auditee_contact_title"], - "xform_replace_empty_auditee_contact_title", - ) - return general_information - - -def xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration(audit_header): - """Replaces empty or invalid auditee UEI with GSA Migration keyword""" - # Transformation recorded. - if not (audit_header.UEI and is_uei_valid(audit_header.UEI)): - track_transformations( - "UEI", - audit_header.UEI, - "auditee_uei", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration", - ) - audit_header.UEI = settings.GSA_MIGRATION - - return audit_header - - -def track_transformations( - census_column, census_value, gsa_field, gsa_value, transformation_functions -): - """Tracks all transformations related to the general information data.""" - census_data = [CensusRecord(column=census_column, value=census_value).to_dict()] - gsa_fac_data = GsaFacRecord(field=gsa_field, value=gsa_value).to_dict() - function_names = transformation_functions.split(",") - InspectionRecord.append_general_changes( - { - "census_data": census_data, - "gsa_fac_data": gsa_fac_data, - "transformation_functions": function_names, - } - ) - - -def xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(general_information): - """Replaces empty or invalid auditor EIN with GSA Migration keyword""" - # Transformation recorded. - if not ( - general_information.get("auditor_ein") - and re.match( - settings.EMPLOYER_IDENTIFICATION_NUMBER, - general_information.get("auditor_ein"), - ) - ): - track_transformations( - "AUDITOR_EIN", - general_information.get("auditor_ein"), - "auditor_ein", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration", - ) - general_information["auditor_ein"] = settings.GSA_MIGRATION - - return general_information - - -def xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(general_information): - """Replaces empty or invalid auditee EIN with GSA Migration keyword""" - # Transformation recorded. - if not ( - general_information.get("ein") - and re.match( - settings.EMPLOYER_IDENTIFICATION_NUMBER, general_information.get("ein") - ) - ): - track_transformations( - "EIN", - general_information.get("ein"), - "auditee_ein", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration", - ) - general_information["ein"] = settings.GSA_MIGRATION - - return general_information - - -def xform_audit_period_other_months(general_information, audit_header): - """ - This method addresses cases where the reporting period spans a non-standard duration, ensuring that - the total number of months covered is accurately accounted for. This is applicable in scenarios - where the covered period could span any number of months, rather than fixed annual or biennial periods. - """ - if string_to_string(audit_header.PERIODCOVERED) == "O": - general_information["audit_period_other_months"] = str( - audit_header.NUMBERMONTHS - ).zfill(2) - - -def xform_replace_empty_zips(general_information): - """Replaces empty auditor and auditee zipcodes with GSA Migration keyword""" - auditor_zip = general_information.get("auditor_zip") - is_usa_auditor = general_information.get("auditor_country") != "non-USA" - - if is_usa_auditor: - if not auditor_zip: - new_auditor_zip = settings.GSA_MIGRATION - else: - new_auditor_zip = xform_remove_hyphen_and_pad_zip(auditor_zip) - - if new_auditor_zip != auditor_zip: - track_transformations( - "CPAZIPCODE", - auditor_zip, - "auditor_zip", - new_auditor_zip, - "xform_replace_empty_zips", - ) - general_information["auditor_zip"] = new_auditor_zip - - auditee_zip = general_information.get("auditee_zip") - if not auditee_zip: - new_auditee_zip = settings.GSA_MIGRATION - else: - new_auditee_zip = xform_remove_hyphen_and_pad_zip(auditee_zip) - - if new_auditee_zip != auditee_zip: - track_transformations( - "ZIPCODE", - auditee_zip, - "auditee_zip", - new_auditee_zip, - "xform_replace_empty_zips", - ) - general_information["auditee_zip"] = new_auditee_zip - - return general_information - - -def general_information(audit_header): - """Generates general information JSON.""" - xform_update_multiple_eins_flag(audit_header) - xform_update_multiple_ueis_flag(audit_header) - xform_update_entity_type(audit_header) - xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration(audit_header) - general_information = create_json_from_db_object(audit_header, mappings) - xform_audit_period_other_months(general_information, audit_header) - transformations = [ - xform_auditee_fiscal_period_start, - xform_auditee_fiscal_period_end, - xform_country_v2, - xform_audit_period_covered, - xform_audit_type, - xform_replace_empty_auditor_email, - xform_replace_empty_auditee_email, - xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration, - xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration, - xform_replace_empty_zips, - xform_replace_empty_auditee_contact_name, - xform_replace_empty_auditee_contact_title, - ] - - for transform in transformations: - if transform == xform_country_v2: - general_information = transform(general_information, audit_header) - else: - general_information = transform(general_information) - - audit.validators.validate_general_information_complete_json(general_information) - - return general_information +import json +import re +from datetime import timedelta + +from django.conf import settings + +import audit.validators +from ..workbooklib.additional_ueis import get_ueis +from ..workbooklib.additional_eins import get_eins +from ..transforms.xform_retrieve_uei import xform_retrieve_uei +from ..transforms.xform_remove_hyphen_and_pad_zip import xform_remove_hyphen_and_pad_zip +from ..transforms.xform_string_to_string import string_to_string +from ..exception_utils import DataMigrationError +from ..sac_general_lib.utils import ( + xform_census_date_to_datetime, +) +from ..base_field_maps import FormFieldMap, FormFieldInDissem +from ..sac_general_lib.utils import ( + create_json_from_db_object, +) +from jsonschema import validate +from jsonschema.exceptions import ValidationError +from ..change_record import InspectionRecord, CensusRecord, GsaFacRecord +from ..report_type_flag import AceFlag + +PERIOD_DICT = {"A": "annual", "B": "biennial", "O": "other"} +AUDIT_TYPE_DICT = { + "S": "single-audit", + "P": "program-specific", + "A": "alternative-compliance-engagement", +} + + +def xform_entity_type(phrase): + """Transforms the entity type from Census format to FAC format. + For context, see ticket #2912. + """ + # Transformation recorded. + mappings = { + r"institution\s+of\s+higher\s+education": "higher-ed", + r"non-?profit": "non-profit", + r"local\s+government": "local", + r"state": "state", + r"unknown": "unknown", + r"trib(e|al)": "tribal", + } + new_phrase = string_to_string(phrase) + + # Check each pattern in the mappings with case-insensitive search + for pattern, value in mappings.items(): + if re.search(pattern, new_phrase, re.IGNORECASE): + track_transformations( + "ENTITY_TYPE", + phrase, + "entity_type", + value, + "xform_entity_type", + ) + return value + raise DataMigrationError( + f"Could not find a match for historic entity type '{phrase}'", + "invalid_historic_entity_type", + ) + + +mappings = [ + FormFieldMap( + "auditee_fiscal_period_start", "FYENDDATE", "fy_start_date", None, str + ), + FormFieldMap("auditee_fiscal_period_end", "FYENDDATE", "fy_end_date", None, str), + FormFieldMap("audit_period_covered", "PERIODCOVERED", FormFieldInDissem, None, str), + FormFieldMap("audit_type", "AUDITTYPE", FormFieldInDissem, None, str), + FormFieldMap("auditee_address_line_1", "STREET1", FormFieldInDissem, None, str), + FormFieldMap("auditee_city", "CITY", FormFieldInDissem, None, str), + FormFieldMap( + "auditee_contact_name", "AUDITEECONTACT", FormFieldInDissem, None, str + ), + FormFieldMap("auditee_contact_title", "AUDITEETITLE", FormFieldInDissem, None, str), + FormFieldMap("auditee_email", "AUDITEEEMAIL", FormFieldInDissem, None, str), + FormFieldMap("auditee_name", "AUDITEENAME", FormFieldInDissem, None, str), + FormFieldMap("auditee_phone", "AUDITEEPHONE", FormFieldInDissem, None, str), + FormFieldMap("auditee_state", "STATE", FormFieldInDissem, None, str), + FormFieldMap( + "auditee_uei", + "UEI", + FormFieldInDissem, + None, + xform_retrieve_uei, + ), + FormFieldMap( + "auditee_zip", + "ZIPCODE", + FormFieldInDissem, + None, + str, + ), + FormFieldMap("auditor_address_line_1", "CPASTREET1", FormFieldInDissem, None, str), + FormFieldMap("auditor_city", "CPACITY", FormFieldInDissem, None, str), + FormFieldMap("auditor_contact_name", "CPACONTACT", FormFieldInDissem, None, str), + FormFieldMap("auditor_contact_title", "CPATITLE", FormFieldInDissem, None, str), + FormFieldMap("auditor_country", "CPACOUNTRY", FormFieldInDissem, None, str), + FormFieldMap("auditor_ein", "AUDITOR_EIN", FormFieldInDissem, None, str), + FormFieldMap( + "auditor_ein_not_an_ssn_attestation", None, None, "Y", bool + ), # Not in DB, not disseminated, needed for validation + FormFieldMap("auditor_email", "CPAEMAIL", FormFieldInDissem, None, str), + FormFieldMap("auditor_firm_name", "CPAFIRMNAME", FormFieldInDissem, None, str), + FormFieldMap("auditor_phone", "CPAPHONE", FormFieldInDissem, None, str), + FormFieldMap("auditor_state", "CPASTATE", FormFieldInDissem, None, str), + FormFieldMap( + "auditor_zip", + "CPAZIPCODE", + FormFieldInDissem, + None, + str, + ), + FormFieldMap("ein", "EIN", "auditee_ein", None, str), + FormFieldMap( + "ein_not_an_ssn_attestation", None, None, "Y", bool + ), # Not in DB, not disseminated, needed for validation + FormFieldMap( + "is_usa_based", None, None, "Y", bool + ), # Not in DB, not disseminated, needed for validation + FormFieldMap( + "met_spending_threshold", None, None, "Y", bool + ), # Not in DB, not disseminated, needed for validation + FormFieldMap( + "multiple_eins_covered", "MULTIPLEEINS", None, None, bool + ), # In DB, not disseminated, needed for validation + FormFieldMap( + "multiple_ueis_covered", "MULTIPLEUEIS", "is_additional_ueis", None, bool + ), + FormFieldMap( + "user_provided_organization_type", + "ENTITY_TYPE", + "entity_type", + None, + xform_entity_type, + ), + FormFieldMap( + "secondary_auditors_exist", "MULTIPLE_CPAS", None, None, bool + ), # In DB, not disseminated, needed for validation +] + + +def is_uei_valid(uei): + try: + with open(f"{settings.OUTPUT_BASE_DIR}/UeiSchema.json") as schema: + schema_json = json.load(schema) + uei_schema = schema_json.get("properties")["uei"] + validate(instance=uei, schema=uei_schema) + return True + except FileNotFoundError: + raise DataMigrationError( + f"UeiSchema.json file not found in {settings.OUTPUT_BASE_DIR}", + "missing_uei_schema_json", + ) + except json.decoder.JSONDecodeError: + raise DataMigrationError( + "UeiSchema.json file contains invalid JSON.", "invalid_uei_schema_json" + ) + + except ValidationError: + return False + + except Exception as e: + raise DataMigrationError( + f"Error validating Auditee UEI: {e}", "cannot_valid_auditee_uei" + ) + + +def xform_update_multiple_eins_flag(audit_header): + """Updates the multiple_eins_covered flag. + This updates does not propagate to the database, it only updates the object. + """ + if not string_to_string(audit_header.MULTIPLEEINS): + eins = get_eins(audit_header.DBKEY, audit_header.AUDITYEAR) + audit_header.MULTIPLEEINS = "Y" if eins else "N" + + +def xform_update_multiple_ueis_flag(audit_header): + """Updates the multiple_ueis_covered flag. + This updates does not propagate to the database, it only updates the object. + """ + if not string_to_string(audit_header.MULTIPLEUEIS): + ueis = get_ueis(audit_header.DBKEY, audit_header.AUDITYEAR) + audit_header.MULTIPLEUEIS = "Y" if ueis else "N" + + +def xform_update_entity_type(audit_header): + """Updates ENTITY_TYPE. + This updates does not propagate to the database, it only updates the object. + """ + if string_to_string(audit_header.ENTITY_TYPE) == "": + audit_header.ENTITY_TYPE = ( + "tribal" + if string_to_string(audit_header.SUPPRESSION_CODE).upper() == "IT" + else "unknown" + ) + track_transformations( + "ENTITY_TYPE", + "", + "entity_type", + audit_header.ENTITY_TYPE, + "xform_update_entity_type", + ) + + +def _period_covered(s): + """Helper to transform the period covered from Census format to FAC format.""" + if s not in PERIOD_DICT: + raise DataMigrationError( + f"Key '{s}' not found in period coverage mapping", + "invalid_period_coverage_key", + ) + return PERIOD_DICT[s] + + +def _census_audit_type(s): + """Helper to transform the audit type from Census format to FAC format.""" + + if s not in AUDIT_TYPE_DICT: + raise DataMigrationError( + f"Key '{s}' not found in census audit type mapping", + "invalid_census_audit_type_key", + ) + return AUDIT_TYPE_DICT[s] + + +def xform_country(general_information, audit_header): + """Transforms the country from Census format to FAC format. This method is now deprecated.""" + # Transformation to be documented. + auditor_country = string_to_string( + general_information.get("auditor_country") + ).upper() + if auditor_country in ["US", "USA"]: + general_information["auditor_country"] = "USA" + elif auditor_country == "": + valid_file = open(f"{settings.SCHEMA_BASE_DIR}/States.json") + valid_json = json.load(valid_file) + auditor_state = string_to_string(audit_header.CPASTATE).upper() + if auditor_state in valid_json["UnitedStatesStateAbbr"]: + general_information["auditor_country"] = "USA" + else: + raise DataMigrationError( + f"Unable to determine auditor country. Invalid state: {auditor_state}", + "invalid_state", + ) + else: + raise DataMigrationError( + f"Unable to determine auditor country. Unknown code: {auditor_country}", + "invalid_country", + ) + + return general_information + + +def xform_country_v2(general_information, audit_header): + """Transforms the country from Census format to FAC format.""" + auditor_country = string_to_string( + general_information.get("auditor_country") + ).upper() + if auditor_country in ["US", "USA"]: + general_information["auditor_country"] = "USA" + elif auditor_country == "NON-US" and not ( + audit_header.CPAZIPCODE + or audit_header.CPASTATE + or audit_header.CPACITY + or audit_header.CPASTREET1 + ): + general_information["auditor_country"] = "non-USA" + general_information["auditor_international_address"] = audit_header.CPAFOREIGN + elif auditor_country == "": + valid_file = open(f"{settings.SCHEMA_BASE_DIR}/States.json") + valid_json = json.load(valid_file) + auditor_state = string_to_string(audit_header.CPASTATE).upper() + if auditor_state in valid_json["UnitedStatesStateAbbr"]: + general_information["auditor_country"] = "USA" + else: + raise DataMigrationError( + f"Unable to determine auditor country. Invalid state: {auditor_state}", + "invalid_state", + ) + else: + raise DataMigrationError( + f"Unable to determine auditor country. Unknown code: {auditor_country}", + "invalid_country", + ) + + return general_information + + +def xform_auditee_fiscal_period_end(general_information): + """Transforms the fiscal period end from Census format to FAC format.""" + # Transformation to be documented. + if general_information.get("auditee_fiscal_period_end"): + general_information["auditee_fiscal_period_end"] = ( + xform_census_date_to_datetime( + general_information.get("auditee_fiscal_period_end") + ).strftime("%Y-%m-%d") + ) + else: + raise DataMigrationError( + "Auditee fiscal period end is empty.", + "invalid_auditee_fiscal_period_end", + ) + + return general_information + + +def xform_auditee_fiscal_period_start(general_information): + """Constructs the fiscal period start from the fiscal period end""" + # Transformation to be documented. + fiscal_start_date = xform_census_date_to_datetime( + general_information.get("auditee_fiscal_period_end") + ) - timedelta(days=365) + general_information["auditee_fiscal_period_start"] = fiscal_start_date.strftime( + "%Y-%m-%d" + ) + + return general_information + + +def xform_audit_period_covered(general_information): + """Transforms the period covered from Census format to FAC format.""" + # Transformation recorded. + if general_information.get("audit_period_covered"): + value_in_db = general_information["audit_period_covered"] + general_information["audit_period_covered"] = _period_covered( + value_in_db.upper() + ) + track_transformations( + "PERIODCOVERED", + value_in_db, + "audit_period_covered", + general_information["audit_period_covered"], + "xform_audit_period_covered", + ) + else: + raise DataMigrationError( + f"Audit period covered is empty: {general_information.get('audit_period_covered')}", + "invalid_audit_period_covered", + ) + return general_information + + +def xform_audit_type(general_information): + """Transforms the audit type from Census format to FAC format.""" + # Transformation recorded. + if general_information.get("audit_type"): + value_in_db = general_information["audit_type"] + audit_type = _census_audit_type(value_in_db.upper()) + if audit_type == AUDIT_TYPE_DICT["A"]: + audit_type = settings.GSA_MIGRATION + AceFlag.set_ace_report_flag(True) + general_information["audit_type"] = audit_type + track_transformations( + "AUDITTYPE", + value_in_db, + "audit_type", + general_information["audit_type"], + "xform_audit_type", + ) + else: + raise DataMigrationError( + f"Audit type is empty: {general_information.get('audit_type')}", + "invalid_audit_type", + ) + return general_information + + +def xform_replace_empty_auditor_email(general_information): + """Replaces empty auditor email with GSA Migration keyword""" + # Transformation recorded. + if not general_information.get("auditor_email"): + general_information["auditor_email"] = settings.GSA_MIGRATION + track_transformations( + "CPAEMAIL", + "", + "auditor_email", + general_information["auditor_email"], + "xform_replace_empty_auditor_email", + ) + return general_information + + +def xform_replace_empty_auditee_email(general_information): + """Replaces empty auditee email with GSA Migration keyword""" + # Transformation recorded. + if not general_information.get("auditee_email"): + general_information["auditee_email"] = settings.GSA_MIGRATION + track_transformations( + "AUDITEEEMAIL", + "", + "auditee_email", + general_information["auditee_email"], + "xform_replace_empty_auditee_email", + ) + return general_information + + +def xform_replace_empty_auditee_contact_name(general_information): + """Replaces empty auditee contact name with GSA Migration keyword""" + # Transformation recorded. + if not general_information.get("auditee_contact_name"): + general_information["auditee_contact_name"] = settings.GSA_MIGRATION + track_transformations( + "AUDITEECONTACT", + "", + "auditee_contact_name", + general_information["auditee_contact_name"], + "xform_replace_empty_auditee_contact_name", + ) + return general_information + + +def xform_replace_empty_auditee_contact_title(general_information): + """Replaces empty auditee contact title with GSA Migration keyword""" + # Transformation recorded. + if not general_information.get("auditee_contact_title"): + general_information["auditee_contact_title"] = settings.GSA_MIGRATION + track_transformations( + "AUDITEETITLE", + "", + "auditee_contact_title", + general_information["auditee_contact_title"], + "xform_replace_empty_auditee_contact_title", + ) + return general_information + + +def xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration(audit_header): + """Replaces empty or invalid auditee UEI with GSA Migration keyword""" + # Transformation recorded. + if not (audit_header.UEI and is_uei_valid(audit_header.UEI)): + track_transformations( + "UEI", + audit_header.UEI, + "auditee_uei", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration", + ) + audit_header.UEI = settings.GSA_MIGRATION + + return audit_header + + +def track_transformations( + census_column, census_value, gsa_field, gsa_value, transformation_functions +): + """Tracks all transformations related to the general information data.""" + census_data = [CensusRecord(column=census_column, value=census_value).to_dict()] + gsa_fac_data = GsaFacRecord(field=gsa_field, value=gsa_value).to_dict() + function_names = transformation_functions.split(",") + InspectionRecord.append_general_changes( + { + "census_data": census_data, + "gsa_fac_data": gsa_fac_data, + "transformation_functions": function_names, + } + ) + + +def xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(general_information): + """Replaces empty or invalid auditor EIN with GSA Migration keyword""" + # Transformation recorded. + if not ( + general_information.get("auditor_ein") + and re.match( + settings.EMPLOYER_IDENTIFICATION_NUMBER, + general_information.get("auditor_ein"), + ) + ): + track_transformations( + "AUDITOR_EIN", + general_information.get("auditor_ein"), + "auditor_ein", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration", + ) + general_information["auditor_ein"] = settings.GSA_MIGRATION + + return general_information + + +def xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(general_information): + """Replaces empty or invalid auditee EIN with GSA Migration keyword""" + # Transformation recorded. + if not ( + general_information.get("ein") + and re.match( + settings.EMPLOYER_IDENTIFICATION_NUMBER, general_information.get("ein") + ) + ): + track_transformations( + "EIN", + general_information.get("ein"), + "auditee_ein", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration", + ) + general_information["ein"] = settings.GSA_MIGRATION + + return general_information + + +def xform_audit_period_other_months(general_information, audit_header): + """ + This method addresses cases where the reporting period spans a non-standard duration, ensuring that + the total number of months covered is accurately accounted for. This is applicable in scenarios + where the covered period could span any number of months, rather than fixed annual or biennial periods. + """ + if string_to_string(audit_header.PERIODCOVERED) == "O": + general_information["audit_period_other_months"] = str( + audit_header.NUMBERMONTHS + ).zfill(2) + + +def xform_replace_empty_zips(general_information): + """Replaces empty auditor and auditee zipcodes with GSA Migration keyword""" + auditor_zip = general_information.get("auditor_zip") + is_usa_auditor = general_information.get("auditor_country") != "non-USA" + + if is_usa_auditor: + if not auditor_zip: + new_auditor_zip = settings.GSA_MIGRATION + else: + new_auditor_zip = xform_remove_hyphen_and_pad_zip(auditor_zip) + + if new_auditor_zip != auditor_zip: + track_transformations( + "CPAZIPCODE", + auditor_zip, + "auditor_zip", + new_auditor_zip, + "xform_replace_empty_zips", + ) + general_information["auditor_zip"] = new_auditor_zip + + auditee_zip = general_information.get("auditee_zip") + if not auditee_zip: + new_auditee_zip = settings.GSA_MIGRATION + else: + new_auditee_zip = xform_remove_hyphen_and_pad_zip(auditee_zip) + + if new_auditee_zip != auditee_zip: + track_transformations( + "ZIPCODE", + auditee_zip, + "auditee_zip", + new_auditee_zip, + "xform_replace_empty_zips", + ) + general_information["auditee_zip"] = new_auditee_zip + + return general_information + + +def general_information(audit_header): + """Generates general information JSON.""" + xform_update_multiple_eins_flag(audit_header) + xform_update_multiple_ueis_flag(audit_header) + xform_update_entity_type(audit_header) + xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration(audit_header) + general_information = create_json_from_db_object(audit_header, mappings) + xform_audit_period_other_months(general_information, audit_header) + transformations = [ + xform_auditee_fiscal_period_start, + xform_auditee_fiscal_period_end, + xform_country_v2, + xform_audit_period_covered, + xform_audit_type, + xform_replace_empty_auditor_email, + xform_replace_empty_auditee_email, + xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration, + xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration, + xform_replace_empty_zips, + xform_replace_empty_auditee_contact_name, + xform_replace_empty_auditee_contact_title, + ] + + for transform in transformations: + if transform == xform_country_v2: + general_information = transform(general_information, audit_header) + else: + general_information = transform(general_information) + + audit.validators.validate_general_information_complete_json(general_information) + + return general_information diff --git a/backend/census_historical_migration/sac_general_lib/report_id_generator.py b/backend/census_historical_migration/sac_general_lib/report_id_generator.py index 8f478d25da..4accafe547 100644 --- a/backend/census_historical_migration/sac_general_lib/report_id_generator.py +++ b/backend/census_historical_migration/sac_general_lib/report_id_generator.py @@ -1,16 +1,16 @@ -from ..transforms.xform_string_to_string import string_to_string -from ..sac_general_lib.utils import ( - xform_census_date_to_datetime, -) - - -def xform_dbkey_to_report_id(audit_header): - """Constructs the report ID from the DBKEY. - The report ID is constructed as follows: - YYYY-MM-CENSUS-DBKEY.""" - dbkey = string_to_string(audit_header.DBKEY) - year = string_to_string(audit_header.AUDITYEAR) - fy_end_date = string_to_string(audit_header.FYENDDATE) - dt = xform_census_date_to_datetime(fy_end_date) - - return f"{year}-{dt.month:02}-CENSUS-{dbkey.zfill(10)}" +from ..transforms.xform_string_to_string import string_to_string +from ..sac_general_lib.utils import ( + xform_census_date_to_datetime, +) + + +def xform_dbkey_to_report_id(audit_header): + """Constructs the report ID from the DBKEY. + The report ID is constructed as follows: + YYYY-MM-CENSUS-DBKEY.""" + dbkey = string_to_string(audit_header.DBKEY) + year = string_to_string(audit_header.AUDITYEAR) + fy_end_date = string_to_string(audit_header.FYENDDATE) + dt = xform_census_date_to_datetime(fy_end_date) + + return f"{year}-{dt.month:02}-CENSUS-{dbkey.zfill(10)}" diff --git a/backend/census_historical_migration/sac_general_lib/sac_creator.py b/backend/census_historical_migration/sac_general_lib/sac_creator.py index 438a051e16..404e35f7b3 100644 --- a/backend/census_historical_migration/sac_general_lib/sac_creator.py +++ b/backend/census_historical_migration/sac_general_lib/sac_creator.py @@ -1,101 +1,101 @@ -import logging - -from django.apps import apps -from django.conf import settings - -from .cognizant_oversight import cognizant_oversight - -from ..exception_utils import DataMigrationError -from .general_information import ( - general_information, -) -from .audit_information import ( - audit_information, -) -from .auditee_certification import ( - auditee_certification, -) -from .auditor_certification import ( - auditor_certification, -) -from .report_id_generator import ( - xform_dbkey_to_report_id, -) -from ..transforms.xform_string_to_string import ( - string_to_string, -) - -logger = logging.getLogger(__name__) - - -def setup_sac(user, audit_header): - """Create a SAC object for the historic data migration.""" - if user is None: - raise DataMigrationError( - "No user provided to setup sac object", - "invalid_user", - ) - - logger.info(f"Creating a SAC object for {user}") - - SingleAuditChecklist = apps.get_model("audit.SingleAuditChecklist") - generated_report_id = xform_dbkey_to_report_id(audit_header) - - try: - exists = SingleAuditChecklist.objects.get(report_id=generated_report_id) - except SingleAuditChecklist.DoesNotExist: - exists = None - if exists: - exists.delete() - - general_info = general_information(audit_header) - - sac = SingleAuditChecklist.objects.create( - submitted_by=user, - general_information=general_info, - audit_information=audit_information(audit_header), - audit_type=general_info["audit_type"], - ) - - sac.report_id = generated_report_id - Access = apps.get_model("audit.Access") - Access.objects.create( - sac=sac, - user=user, - email=user.email, - role="editor", - ) - - # We need these to be different. - Access.objects.create( - sac=sac, - user=user, - email=user.email, - role="certifying_auditee_contact", - ) - Access.objects.create( - sac=sac, - user=user, - email="fac-census-migration-auditor-official@fac.gsa.gov", - role="certifying_auditor_contact", - ) - - sac.auditee_certification = auditee_certification(audit_header) - sac.auditor_certification = auditor_certification(audit_header) - cognizant, oversight = cognizant_oversight(audit_header) - sac.cognizant_agency = cognizant - sac.oversight_agency = oversight - sac.data_source = settings.CENSUS_DATA_SOURCE - - if general_info["user_provided_organization_type"] == "tribal": - suppression_code = string_to_string(audit_header.SUPPRESSION_CODE).upper() - sac.tribal_data_consent = { - "tribal_authorization_certifying_official_title": settings.GSA_MIGRATION, - "is_tribal_information_authorized_to_be_public": suppression_code != "IT", - "tribal_authorization_certifying_official_name": settings.GSA_MIGRATION, - } - - sac.save() - logger.info("Created single audit checklist %s", sac) - - return sac +import logging + +from django.apps import apps +from django.conf import settings + +from .cognizant_oversight import cognizant_oversight + +from ..exception_utils import DataMigrationError +from .general_information import ( + general_information, +) +from .audit_information import ( + audit_information, +) +from .auditee_certification import ( + auditee_certification, +) +from .auditor_certification import ( + auditor_certification, +) +from .report_id_generator import ( + xform_dbkey_to_report_id, +) +from ..transforms.xform_string_to_string import ( + string_to_string, +) + +logger = logging.getLogger(__name__) + + +def setup_sac(user, audit_header): + """Create a SAC object for the historic data migration.""" + if user is None: + raise DataMigrationError( + "No user provided to setup sac object", + "invalid_user", + ) + + logger.info(f"Creating a SAC object for {user}") + + SingleAuditChecklist = apps.get_model("audit.SingleAuditChecklist") + generated_report_id = xform_dbkey_to_report_id(audit_header) + + try: + exists = SingleAuditChecklist.objects.get(report_id=generated_report_id) + except SingleAuditChecklist.DoesNotExist: + exists = None + if exists: + exists.delete() + + general_info = general_information(audit_header) + + sac = SingleAuditChecklist.objects.create( + submitted_by=user, + general_information=general_info, + audit_information=audit_information(audit_header), + audit_type=general_info["audit_type"], + ) + + sac.report_id = generated_report_id + Access = apps.get_model("audit.Access") + Access.objects.create( + sac=sac, + user=user, + email=user.email, + role="editor", + ) + + # We need these to be different. + Access.objects.create( + sac=sac, + user=user, + email=user.email, + role="certifying_auditee_contact", + ) + Access.objects.create( + sac=sac, + user=user, + email="fac-census-migration-auditor-official@fac.gsa.gov", + role="certifying_auditor_contact", + ) + + sac.auditee_certification = auditee_certification(audit_header) + sac.auditor_certification = auditor_certification(audit_header) + cognizant, oversight = cognizant_oversight(audit_header) + sac.cognizant_agency = cognizant + sac.oversight_agency = oversight + sac.data_source = settings.CENSUS_DATA_SOURCE + + if general_info["user_provided_organization_type"] == "tribal": + suppression_code = string_to_string(audit_header.SUPPRESSION_CODE).upper() + sac.tribal_data_consent = { + "tribal_authorization_certifying_official_title": settings.GSA_MIGRATION, + "is_tribal_information_authorized_to_be_public": suppression_code != "IT", + "tribal_authorization_certifying_official_name": settings.GSA_MIGRATION, + } + + sac.save() + logger.info("Created single audit checklist %s", sac) + + return sac diff --git a/backend/census_historical_migration/sac_general_lib/utils.py b/backend/census_historical_migration/sac_general_lib/utils.py index a1a05ba1fb..26d1624976 100644 --- a/backend/census_historical_migration/sac_general_lib/utils.py +++ b/backend/census_historical_migration/sac_general_lib/utils.py @@ -1,103 +1,103 @@ -import logging -from datetime import datetime, timezone -import sys - -from census_historical_migration.report_type_flag import AceFlag - -from ..transforms.xform_string_to_string import ( - string_to_string, -) -from ..transforms.xform_string_to_int import string_to_int -from ..transforms.xform_string_to_bool import string_to_bool -from ..exception_utils import DataMigrationValueError - -logger = logging.getLogger(__name__) - - -def create_json_from_db_object(gobj, mappings): - """Constructs a JSON object from a database object using a list of mappings.""" - json_obj = {} - for mapping in mappings: - if mapping.in_db: - value = getattr(gobj, mapping.in_db, mapping.default) - else: - value = mapping.default - # Fields with a value of None are skipped to maintain consistency with the logic - # used in workbook data extraction and to prevent converting None into the string "None". - if value is None or (value == "" and AceFlag.get_ace_report_flag()): - continue - # Check for the type and apply the correct conversion method - if mapping.type is str: - value = string_to_string(value) - elif mapping.type is bool: - value = string_to_bool(value) - elif mapping.type is int: - value = string_to_int(value) - else: - value = mapping.type(value) - json_obj[mapping.in_form] = value - - return json_obj - - -def xform_census_date_to_datetime(date_string): - """Convert a census date string from '%m/%d/%Y %H:%M:%S' format to 'YYYY-MM-DD' format.""" - # Parse the string into a datetime object - formats = ["%m/%d/%Y %H:%M:%S", "%Y-%m-%d %H:%M:%S"] - for fmt in formats: - try: - dt = datetime.strptime(date_string, fmt) - return dt.date() - except ValueError: - continue - # Raise a custom exception or a ValueError with a descriptive message - raise DataMigrationValueError( - f"Date string '{date_string}' is not in the expected formats: '%m/%d/%Y %H:%M:%S' or 'YYYY-MM-DD HH:MM:SS'", - "invalid_date", - ) - - -def xform_census_date_to_utc_time(date_string): - """Convert a date string from '%m/%d/%Y %H:%M:%S' format to 'YYYY-MM-DD HH:MM:SS.ffffff±HH' format.""" - formats = ["%m/%d/%Y %H:%M:%S", "%Y-%m-%d %H:%M:%S"] - for fmt in formats: - try: - dt = datetime.strptime(date_string, fmt) - dt = dt.replace(tzinfo=timezone.utc) - return dt - except ValueError: - continue - - raise DataMigrationValueError( - f"Date string '{date_string}' is not in the expected formats: '%m/%d/%Y %H:%M:%S' or 'YYYY-MM-DD HH:MM:SS'", - "invalid_date", - ) - - return dt - - -def normalize_year_string_or_exit(year_string): - """ - Normalizes a year string to a four-digit year format or exits the program if the year string is invalid. - """ - # Do not use this function elsewhere to normilize year strings - # or it will quit the program if the year is invalid. - # The goal here is to quickly fail django commands if the year is invalid. - try: - year = int(year_string) - except ValueError: - logger.error("Invalid year string.") - sys.exit(-1) - - if 16 <= year < 23: - return str(year + 2000) - elif 2016 <= year < 2023: - return year_string - else: - logger.error("Invalid year string. Audit year must be between 2016 and 2022") - sys.exit(-1) - - -def is_single_word(s): - """Returns True if the string is a single word, False otherwise.""" - return len(s.split()) == 1 +import logging +from datetime import datetime, timezone +import sys + +from census_historical_migration.report_type_flag import AceFlag + +from ..transforms.xform_string_to_string import ( + string_to_string, +) +from ..transforms.xform_string_to_int import string_to_int +from ..transforms.xform_string_to_bool import string_to_bool +from ..exception_utils import DataMigrationValueError + +logger = logging.getLogger(__name__) + + +def create_json_from_db_object(gobj, mappings): + """Constructs a JSON object from a database object using a list of mappings.""" + json_obj = {} + for mapping in mappings: + if mapping.in_db: + value = getattr(gobj, mapping.in_db, mapping.default) + else: + value = mapping.default + # Fields with a value of None are skipped to maintain consistency with the logic + # used in workbook data extraction and to prevent converting None into the string "None". + if value is None or (value == "" and AceFlag.get_ace_report_flag()): + continue + # Check for the type and apply the correct conversion method + if mapping.type is str: + value = string_to_string(value) + elif mapping.type is bool: + value = string_to_bool(value) + elif mapping.type is int: + value = string_to_int(value) + else: + value = mapping.type(value) + json_obj[mapping.in_form] = value + + return json_obj + + +def xform_census_date_to_datetime(date_string): + """Convert a census date string from '%m/%d/%Y %H:%M:%S' format to 'YYYY-MM-DD' format.""" + # Parse the string into a datetime object + formats = ["%m/%d/%Y %H:%M:%S", "%Y-%m-%d %H:%M:%S"] + for fmt in formats: + try: + dt = datetime.strptime(date_string, fmt) + return dt.date() + except ValueError: + continue + # Raise a custom exception or a ValueError with a descriptive message + raise DataMigrationValueError( + f"Date string '{date_string}' is not in the expected formats: '%m/%d/%Y %H:%M:%S' or 'YYYY-MM-DD HH:MM:SS'", + "invalid_date", + ) + + +def xform_census_date_to_utc_time(date_string): + """Convert a date string from '%m/%d/%Y %H:%M:%S' format to 'YYYY-MM-DD HH:MM:SS.ffffff±HH' format.""" + formats = ["%m/%d/%Y %H:%M:%S", "%Y-%m-%d %H:%M:%S"] + for fmt in formats: + try: + dt = datetime.strptime(date_string, fmt) + dt = dt.replace(tzinfo=timezone.utc) + return dt + except ValueError: + continue + + raise DataMigrationValueError( + f"Date string '{date_string}' is not in the expected formats: '%m/%d/%Y %H:%M:%S' or 'YYYY-MM-DD HH:MM:SS'", + "invalid_date", + ) + + return dt + + +def normalize_year_string_or_exit(year_string): + """ + Normalizes a year string to a four-digit year format or exits the program if the year string is invalid. + """ + # Do not use this function elsewhere to normilize year strings + # or it will quit the program if the year is invalid. + # The goal here is to quickly fail django commands if the year is invalid. + try: + year = int(year_string) + except ValueError: + logger.error("Invalid year string.") + sys.exit(-1) + + if 16 <= year < 23: + return str(year + 2000) + elif 2016 <= year < 2023: + return year_string + else: + logger.error("Invalid year string. Audit year must be between 2016 and 2022") + sys.exit(-1) + + +def is_single_word(s): + """Returns True if the string is a single word, False otherwise.""" + return len(s.split()) == 1 diff --git a/backend/census_historical_migration/test_additional_eins_xforms.py b/backend/census_historical_migration/test_additional_eins_xforms.py index f8eea8e473..3f4adb076b 100644 --- a/backend/census_historical_migration/test_additional_eins_xforms.py +++ b/backend/census_historical_migration/test_additional_eins_xforms.py @@ -1,21 +1,21 @@ -from django.test import SimpleTestCase - -from .workbooklib.additional_eins import xform_remove_trailing_decimal_zero -from .exception_utils import DataMigrationValueError - - -class TestXformRemoveTrailingDecimalZero(SimpleTestCase): - def test_with_trailing_decimal_zero(self): - self.assertEqual(xform_remove_trailing_decimal_zero("12345.0"), "12345") - self.assertEqual(xform_remove_trailing_decimal_zero("67890.0"), "67890") - - def test_without_trailing_decimal_zero(self): - self.assertEqual(xform_remove_trailing_decimal_zero("12345"), "12345") - with self.assertRaises(DataMigrationValueError): - xform_remove_trailing_decimal_zero("67890.5") - - def test_empty_string(self): - self.assertEqual(xform_remove_trailing_decimal_zero(""), "") - - def test_non_string_input(self): - self.assertEqual(xform_remove_trailing_decimal_zero(None), "") +from django.test import SimpleTestCase + +from .workbooklib.additional_eins import xform_remove_trailing_decimal_zero +from .exception_utils import DataMigrationValueError + + +class TestXformRemoveTrailingDecimalZero(SimpleTestCase): + def test_with_trailing_decimal_zero(self): + self.assertEqual(xform_remove_trailing_decimal_zero("12345.0"), "12345") + self.assertEqual(xform_remove_trailing_decimal_zero("67890.0"), "67890") + + def test_without_trailing_decimal_zero(self): + self.assertEqual(xform_remove_trailing_decimal_zero("12345"), "12345") + with self.assertRaises(DataMigrationValueError): + xform_remove_trailing_decimal_zero("67890.5") + + def test_empty_string(self): + self.assertEqual(xform_remove_trailing_decimal_zero(""), "") + + def test_non_string_input(self): + self.assertEqual(xform_remove_trailing_decimal_zero(None), "") diff --git a/backend/census_historical_migration/test_audit_information_xforms.py b/backend/census_historical_migration/test_audit_information_xforms.py index a5f65c4691..b9e8baf857 100644 --- a/backend/census_historical_migration/test_audit_information_xforms.py +++ b/backend/census_historical_migration/test_audit_information_xforms.py @@ -1,282 +1,282 @@ -from unittest.mock import MagicMock, patch -from django.test import SimpleTestCase - -from .sac_general_lib.audit_information import ( - ace_audit_information, - xform_build_sp_framework_gaap_results, - xform_framework_basis, - xform_sp_framework_required, - xform_lowrisk, -) -from .exception_utils import DataMigrationError -from django.conf import settings - - -class TestXformBuildSpFrameworkGaapResults(SimpleTestCase): - class MockAuditHeader: - def __init__( - self, - DBKEY, - TYPEREPORT_FS, - SP_FRAMEWORK_REQUIRED, - TYPEREPORT_SP_FRAMEWORK, - SP_FRAMEWORK, - ): - self.DBKEY = DBKEY - self.TYPEREPORT_FS = TYPEREPORT_FS - self.SP_FRAMEWORK_REQUIRED = SP_FRAMEWORK_REQUIRED - self.TYPEREPORT_SP_FRAMEWORK = TYPEREPORT_SP_FRAMEWORK - self.SP_FRAMEWORK = SP_FRAMEWORK - - def _mock_audit_header(self): - """Returns a mock audit header with all necessary fields.""" - return self.MockAuditHeader( - DBKEY="123456789", - TYPEREPORT_FS="UQADS", - SP_FRAMEWORK_REQUIRED="Y", - TYPEREPORT_SP_FRAMEWORK="UQAD", - SP_FRAMEWORK="cash", - ) - - def test_normal_operation_with_all_opinions(self): - """Test that the function returns the correct results when all opinions are present.""" - audit_header = self._mock_audit_header() - result = xform_build_sp_framework_gaap_results(audit_header) - self.assertIn("unmodified_opinion", result["gaap_results"]) - self.assertIn("qualified_opinion", result["gaap_results"]) - self.assertIn("adverse_opinion", result["gaap_results"]) - self.assertIn("disclaimer_of_opinion", result["gaap_results"]) - self.assertTrue(result["is_sp_framework_required"]) - self.assertIn("cash_basis", result["sp_framework_basis"]) - - def test_missing_some_sp_framework_details(self): - """Test that the function returns the correct results when some purpose framework details are missing.""" - audit_header = self._mock_audit_header() - audit_header.TYPEREPORT_FS = "UQ" - result = xform_build_sp_framework_gaap_results(audit_header) - self.assertIn("unmodified_opinion", result["gaap_results"]) - self.assertIn("qualified_opinion", result["gaap_results"]) - self.assertNotIn("adverse_opinion", result["gaap_results"]) - self.assertNotIn("disclaimer_of_opinion", result["gaap_results"]) - self.assertNotIn("is_sp_framework_required", result) - self.assertNotIn("sp_framework_basis", result) - - def test_missing_all_sp_framework_details(self): - """Test that the function raises an exception when all special purpose framework details are missing.""" - audit_header = self._mock_audit_header() - audit_header.TYPEREPORT_FS = "" - with self.assertRaises(DataMigrationError): - xform_build_sp_framework_gaap_results(audit_header) - - def test_incorrect_framework_basis(self): - """Test that the function raises an exception when the special purpose framework basis is incorrect.""" - audit_header = self._mock_audit_header() - audit_header.SP_FRAMEWORK = "incorrect_basis" - with self.assertRaises(DataMigrationError): - xform_build_sp_framework_gaap_results(audit_header) - - -class TestXformFrameworkBasis(SimpleTestCase): - def test_valid_bases(self): - """Test that the function returns the correct results for each basis in lower cases.""" - self.assertEqual(xform_framework_basis("cash"), "cash_basis") - self.assertEqual(xform_framework_basis("contractual"), "contractual_basis") - self.assertEqual(xform_framework_basis("regulatory"), "regulatory_basis") - self.assertEqual(xform_framework_basis("tax"), "tax_basis") - self.assertEqual(xform_framework_basis("other"), "other_basis") - - def test_basis_with_upper_cases(self): - """Test that the function returns the correct results for each basis in upper cases.""" - self.assertEqual(xform_framework_basis("CASH"), "cash_basis") - self.assertEqual( - xform_framework_basis("OTHER"), - "other_basis", - ) - self.assertEqual(xform_framework_basis("REGULATORY"), "regulatory_basis") - self.assertEqual(xform_framework_basis("TAX"), "tax_basis") - self.assertEqual( - xform_framework_basis("CONTRACTUAL"), - "contractual_basis", - ) - - def test_invalid_basis(self): - """Test that the function raises an exception when the basis is invalid.""" - with self.assertRaises(DataMigrationError): - xform_framework_basis("equity") - - def test_empty_string(self): - """Test that the function raises an exception when the basis is an empty string.""" - with self.assertRaises(DataMigrationError): - xform_framework_basis("") - - def test_none_input(self): - """Test that the function raises an exception when the basis is None.""" - with self.assertRaises(DataMigrationError): - xform_framework_basis(None) - - def test_basis_with_multiple_spaces(self): - """Test that the function returns the correct results when the basis has multiple spaces.""" - self.assertEqual(xform_framework_basis(" cash "), "cash_basis") - self.assertEqual( - xform_framework_basis(" other "), - "other_basis", - ) - with self.assertRaises(DataMigrationError): - xform_framework_basis(" unknown ") - - def test_basis_with_extra_words(self): - """Test that the function raises an exception when the basis has extra words.""" - with self.assertRaises(DataMigrationError): - xform_framework_basis("Something with cash basis") - - -class TestXformSpFrameworkRequired(SimpleTestCase): - class MockAuditHeader: - def __init__( - self, - DBKEY, - TYPEREPORT_FS, - SP_FRAMEWORK_REQUIRED, - ): - self.DBKEY = DBKEY - self.TYPEREPORT_FS = TYPEREPORT_FS - self.SP_FRAMEWORK_REQUIRED = SP_FRAMEWORK_REQUIRED - - def _mock_audit_header(self): - """Returns a mock audit header with all necessary fields.""" - return self.MockAuditHeader( - DBKEY="123456789", - TYPEREPORT_FS="AS", - SP_FRAMEWORK_REQUIRED="Y", - ) - - def test_sp_framework_required_Y(self): - """Test that the function returns 'Y' for SP_FRAMEWORK_REQUIRED.""" - audit_header = self._mock_audit_header() - xform_sp_framework_required(audit_header) - self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, "Y") - - def test_sp_framework_required_N(self): - """Test that the function returns 'N' for SP_FRAMEWORK_REQUIRED.""" - audit_header = self._mock_audit_header() - audit_header.SP_FRAMEWORK_REQUIRED = "N" - xform_sp_framework_required(audit_header) - self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, "N") - - def test_sp_framework_required_blank(self): - """Test that the function returns GSA_MIGRATION for SP_FRAMEWORK_REQUIRED.""" - audit_header = self._mock_audit_header() - audit_header.SP_FRAMEWORK_REQUIRED = "" - xform_sp_framework_required(audit_header) - self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, settings.GSA_MIGRATION) - - -class TestXformLowrisk(SimpleTestCase): - class MockAuditHeader: - def __init__( - self, - DBKEY, - LOWRISK, - ): - self.DBKEY = DBKEY - self.LOWRISK = LOWRISK - - def _mock_audit_header(self): - """Returns a mock audit header with all necessary fields.""" - return self.MockAuditHeader( - DBKEY="123456789", - LOWRISK="Y", - ) - - def test_lowrisk_Y(self): - """Test that the function returns 'Y' for LOWRISK.""" - audit_header = self._mock_audit_header() - xform_lowrisk(audit_header) - self.assertEqual(audit_header.LOWRISK, "Y") - - def test_lowrisk_N(self): - """Test that the function returns 'N' for LOWRISK.""" - audit_header = self._mock_audit_header() - audit_header.LOWRISK = "N" - xform_lowrisk(audit_header) - self.assertEqual(audit_header.LOWRISK, "N") - - def test_lowrisk_blank(self): - """Test that the function returns GSA_MIGRATION for LOWRISK.""" - audit_header = self._mock_audit_header() - audit_header.LOWRISK = "" - xform_lowrisk(audit_header) - self.assertEqual(audit_header.LOWRISK, settings.GSA_MIGRATION) - - -class TestAceAuditInformation(SimpleTestCase): - - def setUp(self): - self.audit_header = MagicMock() - self.audit_header.some_field = "test_value" - - self.default_values = { - "dollar_threshold": settings.GSA_MIGRATION_INT, - "gaap_results": [settings.GSA_MIGRATION], - "is_going_concern_included": settings.GSA_MIGRATION, - "is_internal_control_deficiency_disclosed": settings.GSA_MIGRATION, - "is_internal_control_material_weakness_disclosed": settings.GSA_MIGRATION, - "is_material_noncompliance_disclosed": settings.GSA_MIGRATION, - "is_aicpa_audit_guide_included": settings.GSA_MIGRATION, - "is_low_risk_auditee": settings.GSA_MIGRATION, - "agencies": [settings.GSA_MIGRATION], - } - - @patch( - "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" - ) - def test_ace_audit_information_with_actual_values(self, mock_create_json): - """Test that the function returns the correct values when all fields are present.""" - mock_create_json.return_value = { - "dollar_threshold": 1000, - "is_low_risk_auditee": True, - "new_field": "new_value", - } - - result = ace_audit_information(self.audit_header) - - expected_result = self.default_values.copy() - expected_result.update( - { - "dollar_threshold": 1000, - "is_low_risk_auditee": True, - "new_field": "new_value", - } - ) - - self.assertEqual(result, expected_result) - - @patch( - "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" - ) - def test_ace_audit_information_with_default_values(self, mock_create_json): - """Test that the function returns the correct values when all fields are missing.""" - mock_create_json.return_value = {} - - result = ace_audit_information(self.audit_header) - - expected_result = self.default_values - - self.assertEqual(result, expected_result) - - @patch( - "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" - ) - def test_ace_audit_information_with_mixed_values(self, mock_create_json): - """Test that the function returns the correct values when some fields are missing.""" - mock_create_json.return_value = { - "dollar_threshold": None, - "is_low_risk_auditee": True, - } - - result = ace_audit_information(self.audit_header) - - expected_result = self.default_values.copy() - expected_result.update({"is_low_risk_auditee": True}) - - self.assertEqual(result, expected_result) +from unittest.mock import MagicMock, patch +from django.test import SimpleTestCase + +from .sac_general_lib.audit_information import ( + ace_audit_information, + xform_build_sp_framework_gaap_results, + xform_framework_basis, + xform_sp_framework_required, + xform_lowrisk, +) +from .exception_utils import DataMigrationError +from django.conf import settings + + +class TestXformBuildSpFrameworkGaapResults(SimpleTestCase): + class MockAuditHeader: + def __init__( + self, + DBKEY, + TYPEREPORT_FS, + SP_FRAMEWORK_REQUIRED, + TYPEREPORT_SP_FRAMEWORK, + SP_FRAMEWORK, + ): + self.DBKEY = DBKEY + self.TYPEREPORT_FS = TYPEREPORT_FS + self.SP_FRAMEWORK_REQUIRED = SP_FRAMEWORK_REQUIRED + self.TYPEREPORT_SP_FRAMEWORK = TYPEREPORT_SP_FRAMEWORK + self.SP_FRAMEWORK = SP_FRAMEWORK + + def _mock_audit_header(self): + """Returns a mock audit header with all necessary fields.""" + return self.MockAuditHeader( + DBKEY="123456789", + TYPEREPORT_FS="UQADS", + SP_FRAMEWORK_REQUIRED="Y", + TYPEREPORT_SP_FRAMEWORK="UQAD", + SP_FRAMEWORK="cash", + ) + + def test_normal_operation_with_all_opinions(self): + """Test that the function returns the correct results when all opinions are present.""" + audit_header = self._mock_audit_header() + result = xform_build_sp_framework_gaap_results(audit_header) + self.assertIn("unmodified_opinion", result["gaap_results"]) + self.assertIn("qualified_opinion", result["gaap_results"]) + self.assertIn("adverse_opinion", result["gaap_results"]) + self.assertIn("disclaimer_of_opinion", result["gaap_results"]) + self.assertTrue(result["is_sp_framework_required"]) + self.assertIn("cash_basis", result["sp_framework_basis"]) + + def test_missing_some_sp_framework_details(self): + """Test that the function returns the correct results when some purpose framework details are missing.""" + audit_header = self._mock_audit_header() + audit_header.TYPEREPORT_FS = "UQ" + result = xform_build_sp_framework_gaap_results(audit_header) + self.assertIn("unmodified_opinion", result["gaap_results"]) + self.assertIn("qualified_opinion", result["gaap_results"]) + self.assertNotIn("adverse_opinion", result["gaap_results"]) + self.assertNotIn("disclaimer_of_opinion", result["gaap_results"]) + self.assertNotIn("is_sp_framework_required", result) + self.assertNotIn("sp_framework_basis", result) + + def test_missing_all_sp_framework_details(self): + """Test that the function raises an exception when all special purpose framework details are missing.""" + audit_header = self._mock_audit_header() + audit_header.TYPEREPORT_FS = "" + with self.assertRaises(DataMigrationError): + xform_build_sp_framework_gaap_results(audit_header) + + def test_incorrect_framework_basis(self): + """Test that the function raises an exception when the special purpose framework basis is incorrect.""" + audit_header = self._mock_audit_header() + audit_header.SP_FRAMEWORK = "incorrect_basis" + with self.assertRaises(DataMigrationError): + xform_build_sp_framework_gaap_results(audit_header) + + +class TestXformFrameworkBasis(SimpleTestCase): + def test_valid_bases(self): + """Test that the function returns the correct results for each basis in lower cases.""" + self.assertEqual(xform_framework_basis("cash"), "cash_basis") + self.assertEqual(xform_framework_basis("contractual"), "contractual_basis") + self.assertEqual(xform_framework_basis("regulatory"), "regulatory_basis") + self.assertEqual(xform_framework_basis("tax"), "tax_basis") + self.assertEqual(xform_framework_basis("other"), "other_basis") + + def test_basis_with_upper_cases(self): + """Test that the function returns the correct results for each basis in upper cases.""" + self.assertEqual(xform_framework_basis("CASH"), "cash_basis") + self.assertEqual( + xform_framework_basis("OTHER"), + "other_basis", + ) + self.assertEqual(xform_framework_basis("REGULATORY"), "regulatory_basis") + self.assertEqual(xform_framework_basis("TAX"), "tax_basis") + self.assertEqual( + xform_framework_basis("CONTRACTUAL"), + "contractual_basis", + ) + + def test_invalid_basis(self): + """Test that the function raises an exception when the basis is invalid.""" + with self.assertRaises(DataMigrationError): + xform_framework_basis("equity") + + def test_empty_string(self): + """Test that the function raises an exception when the basis is an empty string.""" + with self.assertRaises(DataMigrationError): + xform_framework_basis("") + + def test_none_input(self): + """Test that the function raises an exception when the basis is None.""" + with self.assertRaises(DataMigrationError): + xform_framework_basis(None) + + def test_basis_with_multiple_spaces(self): + """Test that the function returns the correct results when the basis has multiple spaces.""" + self.assertEqual(xform_framework_basis(" cash "), "cash_basis") + self.assertEqual( + xform_framework_basis(" other "), + "other_basis", + ) + with self.assertRaises(DataMigrationError): + xform_framework_basis(" unknown ") + + def test_basis_with_extra_words(self): + """Test that the function raises an exception when the basis has extra words.""" + with self.assertRaises(DataMigrationError): + xform_framework_basis("Something with cash basis") + + +class TestXformSpFrameworkRequired(SimpleTestCase): + class MockAuditHeader: + def __init__( + self, + DBKEY, + TYPEREPORT_FS, + SP_FRAMEWORK_REQUIRED, + ): + self.DBKEY = DBKEY + self.TYPEREPORT_FS = TYPEREPORT_FS + self.SP_FRAMEWORK_REQUIRED = SP_FRAMEWORK_REQUIRED + + def _mock_audit_header(self): + """Returns a mock audit header with all necessary fields.""" + return self.MockAuditHeader( + DBKEY="123456789", + TYPEREPORT_FS="AS", + SP_FRAMEWORK_REQUIRED="Y", + ) + + def test_sp_framework_required_Y(self): + """Test that the function returns 'Y' for SP_FRAMEWORK_REQUIRED.""" + audit_header = self._mock_audit_header() + xform_sp_framework_required(audit_header) + self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, "Y") + + def test_sp_framework_required_N(self): + """Test that the function returns 'N' for SP_FRAMEWORK_REQUIRED.""" + audit_header = self._mock_audit_header() + audit_header.SP_FRAMEWORK_REQUIRED = "N" + xform_sp_framework_required(audit_header) + self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, "N") + + def test_sp_framework_required_blank(self): + """Test that the function returns GSA_MIGRATION for SP_FRAMEWORK_REQUIRED.""" + audit_header = self._mock_audit_header() + audit_header.SP_FRAMEWORK_REQUIRED = "" + xform_sp_framework_required(audit_header) + self.assertEqual(audit_header.SP_FRAMEWORK_REQUIRED, settings.GSA_MIGRATION) + + +class TestXformLowrisk(SimpleTestCase): + class MockAuditHeader: + def __init__( + self, + DBKEY, + LOWRISK, + ): + self.DBKEY = DBKEY + self.LOWRISK = LOWRISK + + def _mock_audit_header(self): + """Returns a mock audit header with all necessary fields.""" + return self.MockAuditHeader( + DBKEY="123456789", + LOWRISK="Y", + ) + + def test_lowrisk_Y(self): + """Test that the function returns 'Y' for LOWRISK.""" + audit_header = self._mock_audit_header() + xform_lowrisk(audit_header) + self.assertEqual(audit_header.LOWRISK, "Y") + + def test_lowrisk_N(self): + """Test that the function returns 'N' for LOWRISK.""" + audit_header = self._mock_audit_header() + audit_header.LOWRISK = "N" + xform_lowrisk(audit_header) + self.assertEqual(audit_header.LOWRISK, "N") + + def test_lowrisk_blank(self): + """Test that the function returns GSA_MIGRATION for LOWRISK.""" + audit_header = self._mock_audit_header() + audit_header.LOWRISK = "" + xform_lowrisk(audit_header) + self.assertEqual(audit_header.LOWRISK, settings.GSA_MIGRATION) + + +class TestAceAuditInformation(SimpleTestCase): + + def setUp(self): + self.audit_header = MagicMock() + self.audit_header.some_field = "test_value" + + self.default_values = { + "dollar_threshold": settings.GSA_MIGRATION_INT, + "gaap_results": [settings.GSA_MIGRATION], + "is_going_concern_included": settings.GSA_MIGRATION, + "is_internal_control_deficiency_disclosed": settings.GSA_MIGRATION, + "is_internal_control_material_weakness_disclosed": settings.GSA_MIGRATION, + "is_material_noncompliance_disclosed": settings.GSA_MIGRATION, + "is_aicpa_audit_guide_included": settings.GSA_MIGRATION, + "is_low_risk_auditee": settings.GSA_MIGRATION, + "agencies": [settings.GSA_MIGRATION], + } + + @patch( + "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" + ) + def test_ace_audit_information_with_actual_values(self, mock_create_json): + """Test that the function returns the correct values when all fields are present.""" + mock_create_json.return_value = { + "dollar_threshold": 1000, + "is_low_risk_auditee": True, + "new_field": "new_value", + } + + result = ace_audit_information(self.audit_header) + + expected_result = self.default_values.copy() + expected_result.update( + { + "dollar_threshold": 1000, + "is_low_risk_auditee": True, + "new_field": "new_value", + } + ) + + self.assertEqual(result, expected_result) + + @patch( + "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" + ) + def test_ace_audit_information_with_default_values(self, mock_create_json): + """Test that the function returns the correct values when all fields are missing.""" + mock_create_json.return_value = {} + + result = ace_audit_information(self.audit_header) + + expected_result = self.default_values + + self.assertEqual(result, expected_result) + + @patch( + "census_historical_migration.sac_general_lib.audit_information.create_json_from_db_object" + ) + def test_ace_audit_information_with_mixed_values(self, mock_create_json): + """Test that the function returns the correct values when some fields are missing.""" + mock_create_json.return_value = { + "dollar_threshold": None, + "is_low_risk_auditee": True, + } + + result = ace_audit_information(self.audit_header) + + expected_result = self.default_values.copy() + expected_result.update({"is_low_risk_auditee": True}) + + self.assertEqual(result, expected_result) diff --git a/backend/census_historical_migration/test_corrective_action_plan_xforms.py b/backend/census_historical_migration/test_corrective_action_plan_xforms.py index f62b71738f..eec684b552 100644 --- a/backend/census_historical_migration/test_corrective_action_plan_xforms.py +++ b/backend/census_historical_migration/test_corrective_action_plan_xforms.py @@ -1,96 +1,96 @@ -from django.conf import settings -from django.test import SimpleTestCase - -from .workbooklib.corrective_action_plan import ( - xform_add_placeholder_for_missing_action_planned_text, - xform_add_placeholder_for_missing_references, -) - - -class TestXformAddPlaceholderForMissingCapText(SimpleTestCase): - class Findings: - def __init__(self, refnum): - self.FINDINGREFNUMS = refnum - - class CapText: - def __init__(self, refnum, seqnum=None, text=None, chartstables=None): - self.FINDINGREFNUMS = refnum - self.SEQ_NUMBER = seqnum - self.TEXT = text - self.CHARTSTABLES = chartstables - - def test_placeholder_addition_for_missing_references(self): - # Setup specific test data - mock_findings = [ - self.Findings("ref1"), - self.Findings("ref2"), - ] - mock_findings_texts = [ - self.CapText("ref1", "1", "text1", "chart1"), - ] - - findings_texts = xform_add_placeholder_for_missing_references( - mock_findings, mock_findings_texts - ) - - self.assertEqual( - len(findings_texts), 2 - ) # Expecting two items in findings_texts - self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") - self.assertEqual(findings_texts[1].TEXT, settings.GSA_MIGRATION) - self.assertEqual(findings_texts[1].CHARTSTABLES, settings.GSA_MIGRATION) - - def test_no_placeholder_addition_when_all_references_present(self): - # Setup specific test data - mock_findings = [ - self.Findings("ref1"), - self.Findings("ref2"), - ] - mock_findings_texts = [ - self.CapText("ref1", "1", "text1", "chart1"), - self.CapText("ref2", "2", "text2", "chart2"), - ] - - findings_texts = xform_add_placeholder_for_missing_references( - mock_findings, mock_findings_texts - ) - - self.assertEqual( - len(findings_texts), 2 - ) # Expecting two items in findings_texts - self.assertEqual(findings_texts[0].FINDINGREFNUMS, "ref1") - self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") - - -class TestXformAddPlaceholderForMissingActionPlannedText(SimpleTestCase): - class CapText: - def __init__(self, FINDINGREFNUMS=None, TEXT=None): - self.FINDINGREFNUMS = FINDINGREFNUMS - self.TEXT = TEXT - - def test_add_placeholder_to_empty_text(self): - captexts = [self.CapText(FINDINGREFNUMS="123", TEXT="")] - expected_text = settings.GSA_MIGRATION - xform_add_placeholder_for_missing_action_planned_text(captexts) - self.assertEqual( - captexts[0].TEXT, - expected_text, - "The TEXT field should have the placeholder text.", - ) - - def test_no_placeholder_if_text_present(self): - captexts = [self.CapText(FINDINGREFNUMS="123", TEXT="Existing text")] - expected_text = "Existing text" - xform_add_placeholder_for_missing_action_planned_text(captexts) - self.assertEqual( - captexts[0].TEXT, expected_text, "The TEXT field should not be modified." - ) - - def test_empty_finding_refnums_no_change(self): - captexts = [self.CapText(FINDINGREFNUMS="", TEXT="")] - xform_add_placeholder_for_missing_action_planned_text(captexts) - self.assertEqual( - captexts[0].TEXT, - "", - "The TEXT field should remain empty if FINDINGREFNUMS is empty.", - ) +from django.conf import settings +from django.test import SimpleTestCase + +from .workbooklib.corrective_action_plan import ( + xform_add_placeholder_for_missing_action_planned_text, + xform_add_placeholder_for_missing_references, +) + + +class TestXformAddPlaceholderForMissingCapText(SimpleTestCase): + class Findings: + def __init__(self, refnum): + self.FINDINGREFNUMS = refnum + + class CapText: + def __init__(self, refnum, seqnum=None, text=None, chartstables=None): + self.FINDINGREFNUMS = refnum + self.SEQ_NUMBER = seqnum + self.TEXT = text + self.CHARTSTABLES = chartstables + + def test_placeholder_addition_for_missing_references(self): + # Setup specific test data + mock_findings = [ + self.Findings("ref1"), + self.Findings("ref2"), + ] + mock_findings_texts = [ + self.CapText("ref1", "1", "text1", "chart1"), + ] + + findings_texts = xform_add_placeholder_for_missing_references( + mock_findings, mock_findings_texts + ) + + self.assertEqual( + len(findings_texts), 2 + ) # Expecting two items in findings_texts + self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") + self.assertEqual(findings_texts[1].TEXT, settings.GSA_MIGRATION) + self.assertEqual(findings_texts[1].CHARTSTABLES, settings.GSA_MIGRATION) + + def test_no_placeholder_addition_when_all_references_present(self): + # Setup specific test data + mock_findings = [ + self.Findings("ref1"), + self.Findings("ref2"), + ] + mock_findings_texts = [ + self.CapText("ref1", "1", "text1", "chart1"), + self.CapText("ref2", "2", "text2", "chart2"), + ] + + findings_texts = xform_add_placeholder_for_missing_references( + mock_findings, mock_findings_texts + ) + + self.assertEqual( + len(findings_texts), 2 + ) # Expecting two items in findings_texts + self.assertEqual(findings_texts[0].FINDINGREFNUMS, "ref1") + self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") + + +class TestXformAddPlaceholderForMissingActionPlannedText(SimpleTestCase): + class CapText: + def __init__(self, FINDINGREFNUMS=None, TEXT=None): + self.FINDINGREFNUMS = FINDINGREFNUMS + self.TEXT = TEXT + + def test_add_placeholder_to_empty_text(self): + captexts = [self.CapText(FINDINGREFNUMS="123", TEXT="")] + expected_text = settings.GSA_MIGRATION + xform_add_placeholder_for_missing_action_planned_text(captexts) + self.assertEqual( + captexts[0].TEXT, + expected_text, + "The TEXT field should have the placeholder text.", + ) + + def test_no_placeholder_if_text_present(self): + captexts = [self.CapText(FINDINGREFNUMS="123", TEXT="Existing text")] + expected_text = "Existing text" + xform_add_placeholder_for_missing_action_planned_text(captexts) + self.assertEqual( + captexts[0].TEXT, expected_text, "The TEXT field should not be modified." + ) + + def test_empty_finding_refnums_no_change(self): + captexts = [self.CapText(FINDINGREFNUMS="", TEXT="")] + xform_add_placeholder_for_missing_action_planned_text(captexts) + self.assertEqual( + captexts[0].TEXT, + "", + "The TEXT field should remain empty if FINDINGREFNUMS is empty.", + ) diff --git a/backend/census_historical_migration/test_excel_creation.py b/backend/census_historical_migration/test_excel_creation.py index 8ceae113e9..713cb91ba8 100644 --- a/backend/census_historical_migration/test_excel_creation.py +++ b/backend/census_historical_migration/test_excel_creation.py @@ -1,234 +1,234 @@ -from .workbooklib.excel_creation_utils import ( - apply_conversion_function, - contains_illegal_characters, - sanitize_for_excel, - set_range, - sort_by_field, -) -from .exception_utils import DataMigrationValueError -from django.test import TestCase -from openpyxl import Workbook -from openpyxl.utils import quote_sheetname, absolute_coordinate -from openpyxl.workbook.defined_name import DefinedName - - -class ExcelCreationTests(TestCase): - range_name = "my_range" - - def init_named_range(self, coord): - """ - Create and return a workbook with a named range for the given coordinate - """ - wb = Workbook() - ws = wb.active - - # Create named range - ref = f"{quote_sheetname(ws.title)}!{absolute_coordinate(coord)}" - defn = DefinedName(self.range_name, attr_text=ref) - wb.defined_names.add(defn) - - return wb - - def test_set_range_standard(self): - """ - Standard use case - """ - wb = self.init_named_range("A6") - ws = wb.active - - ws.cell(row=6, column=1, value="foo") - self.assertEqual(ws["A6"].value, "foo") - - set_range(wb, self.range_name, ["bar"]) - self.assertEqual(ws["A6"].value, "bar") - - def test_set_range_default(self): - """ - Using a default value - """ - wb = self.init_named_range("A6") - ws = wb.active - - ws.cell(row=6, column=1, value="foo") - self.assertEqual(ws["A6"].value, "foo") - - set_range(wb, self.range_name, [None], default="bar") - self.assertEqual(ws["A6"].value, "bar") - - def test_set_range_no_default(self): - """ - Error when no value or default given - """ - wb = self.init_named_range("A6") - ws = wb.active - - ws.cell(row=6, column=1, value="foo") - self.assertEqual(ws["A6"].value, "foo") - - self.assertRaises( - DataMigrationValueError, set_range, wb, self.range_name, [None] - ) - - def test_set_range_conversion(self): - """ - Applies given conversion function - """ - wb = self.init_named_range("A6") - ws = wb.active - - ws.cell(row=6, column=1, value="foo") - self.assertEqual(ws["A6"].value, "foo") - - set_range(wb, self.range_name, ["1"], None, int) - self.assertEqual(ws["A6"].value, 1) # str -> int - - def test_set_range_multiple_values(self): - """ - Setting multiple values - """ - wb = self.init_named_range("A1:A2") - ws = wb.active - - ws.cell(row=1, column=1, value=0) - self.assertEqual(ws["A1"].value, 0) - - ws.cell(row=2, column=1, value=0) - self.assertEqual(ws["A2"].value, 0) - - set_range(wb, self.range_name, ["1", "2"]) - self.assertEqual(ws["A1"].value, "1") - self.assertEqual(ws["A2"].value, "2") - - def test_set_range_fewer_values(self): - """ - Number of values is less than the range size - """ - wb = self.init_named_range("A1:A2") - ws = wb.active - - ws.cell(row=1, column=1, value="foo") - self.assertEqual(ws["A1"].value, "foo") - - ws.cell(row=2, column=1, value="foo") - self.assertEqual(ws["A2"].value, "foo") - - set_range(wb, self.range_name, ["bar"]) - self.assertEqual(ws["A1"].value, "bar") # New value - self.assertEqual(ws["A2"].value, "foo") # Unchanged - - def test_set_range_multi_dests(self): - """ - Error when multiple destinations found - """ - wb = Workbook() - ws = wb.active - - # Create named range with multiple destinations - ref = f"{quote_sheetname(ws.title)}!{absolute_coordinate('A6')}, \ - {quote_sheetname(ws.title)}!{absolute_coordinate('B6')}" - defn = DefinedName(self.range_name, attr_text=ref) - wb.defined_names.add(defn) - - self.assertEqual(len(list(wb.defined_names[self.range_name].destinations)), 2) - self.assertRaises( - DataMigrationValueError, set_range, wb, self.range_name, ["bar"] - ) - - def test_set_range_ws_missing(self): - """ - Error when the named range isn't in the given WS - """ - wb = Workbook() - - # Create named range with bad sheet title - ref = f"{quote_sheetname('wrong name')}!{absolute_coordinate('A6')}" - defn = DefinedName(self.range_name, attr_text=ref) - wb.defined_names.add(defn) - - self.assertRaises(KeyError, set_range, wb, self.range_name, ["bar"]) - - -class TestApplyConversionFunction(TestCase): - def test_string_conversion(self): - """Test that a string is returned unchanged""" - self.assertEqual(apply_conversion_function("test", "default", str), "test") - - def test_int_conversion(self): - """Test that an int is properly returned""" - self.assertEqual(apply_conversion_function("123", "default", int), 123) - - def test_custom_conversion(self): - """Test that custom conversion function is applied when present""" - self.assertEqual( - apply_conversion_function("test", "default", lambda x: x.upper()), "TEST" - ) - - def test_default_value(self): - """Test that a default value is returned when the input is None""" - self.assertEqual(apply_conversion_function(None, "default", str), "default") - - def test_none_with_no_default(self): - """Test that an exception is raised when the input is None and no default is provided""" - with self.assertRaises(DataMigrationValueError): - apply_conversion_function(None, None, str) - - -class TestExcelSanitization(TestCase): - def test_contains_illegal_characters(self): - """Test that contains_illegal_characters returns expected boolean values""" - self.assertTrue( - contains_illegal_characters("Some\x01text\x02with\x03control\x04characters") - ) # Contains control characters - self.assertFalse( - contains_illegal_characters("Some text with no control characters") - ) # No control characters - self.assertFalse( - contains_illegal_characters("\nNew Line\n") - ) # Newline character is allowed - - def test_sanitize_for_excel(self): - """Test that sanitize_for_excel returns expected values""" - self.assertEqual( - sanitize_for_excel("Some\x01Text\x02With\x03Control\x04Characters"), - "SomeTextWithControlCharacters", - ) # Control character removed - self.assertEqual( - sanitize_for_excel("Some text with no control characters"), - "Some text with no control characters", - ) # No change needed - self.assertEqual( - sanitize_for_excel("\nNew Line\n"), "\nNew Line\n" - ) # Newline preserved - - -class TestSortRecordsByField(TestCase): - class MockRecord: - def __init__(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - def test_empty_list(self): - """Test sorting with an empty list.""" - self.assertEqual(sort_by_field([], "some_field"), []) - - def test_sorting(self): - """Test sorting with a non-empty list.""" - records = [ - self.MockRecord(seq_number="1"), - self.MockRecord(seq_number="10"), - self.MockRecord(seq_number="2"), - ] - sorted_records = sort_by_field(records, "seq_number") - self.assertEqual( - [record.seq_number for record in sorted_records], ["1", "2", "10"] - ) - - def test_sorting_with_empty_field(self): - """Test sorting with empty sort field.""" - records = [ - self.MockRecord(other="1", seq_number=""), - self.MockRecord(other="10", seq_number=""), - self.MockRecord(other="2", seq_number=""), - ] - sorted_records = sort_by_field(records, "seq_number") - self.assertEqual([record.other for record in sorted_records], ["1", "10", "2"]) +from .workbooklib.excel_creation_utils import ( + apply_conversion_function, + contains_illegal_characters, + sanitize_for_excel, + set_range, + sort_by_field, +) +from .exception_utils import DataMigrationValueError +from django.test import TestCase +from openpyxl import Workbook +from openpyxl.utils import quote_sheetname, absolute_coordinate +from openpyxl.workbook.defined_name import DefinedName + + +class ExcelCreationTests(TestCase): + range_name = "my_range" + + def init_named_range(self, coord): + """ + Create and return a workbook with a named range for the given coordinate + """ + wb = Workbook() + ws = wb.active + + # Create named range + ref = f"{quote_sheetname(ws.title)}!{absolute_coordinate(coord)}" + defn = DefinedName(self.range_name, attr_text=ref) + wb.defined_names.add(defn) + + return wb + + def test_set_range_standard(self): + """ + Standard use case + """ + wb = self.init_named_range("A6") + ws = wb.active + + ws.cell(row=6, column=1, value="foo") + self.assertEqual(ws["A6"].value, "foo") + + set_range(wb, self.range_name, ["bar"]) + self.assertEqual(ws["A6"].value, "bar") + + def test_set_range_default(self): + """ + Using a default value + """ + wb = self.init_named_range("A6") + ws = wb.active + + ws.cell(row=6, column=1, value="foo") + self.assertEqual(ws["A6"].value, "foo") + + set_range(wb, self.range_name, [None], default="bar") + self.assertEqual(ws["A6"].value, "bar") + + def test_set_range_no_default(self): + """ + Error when no value or default given + """ + wb = self.init_named_range("A6") + ws = wb.active + + ws.cell(row=6, column=1, value="foo") + self.assertEqual(ws["A6"].value, "foo") + + self.assertRaises( + DataMigrationValueError, set_range, wb, self.range_name, [None] + ) + + def test_set_range_conversion(self): + """ + Applies given conversion function + """ + wb = self.init_named_range("A6") + ws = wb.active + + ws.cell(row=6, column=1, value="foo") + self.assertEqual(ws["A6"].value, "foo") + + set_range(wb, self.range_name, ["1"], None, int) + self.assertEqual(ws["A6"].value, 1) # str -> int + + def test_set_range_multiple_values(self): + """ + Setting multiple values + """ + wb = self.init_named_range("A1:A2") + ws = wb.active + + ws.cell(row=1, column=1, value=0) + self.assertEqual(ws["A1"].value, 0) + + ws.cell(row=2, column=1, value=0) + self.assertEqual(ws["A2"].value, 0) + + set_range(wb, self.range_name, ["1", "2"]) + self.assertEqual(ws["A1"].value, "1") + self.assertEqual(ws["A2"].value, "2") + + def test_set_range_fewer_values(self): + """ + Number of values is less than the range size + """ + wb = self.init_named_range("A1:A2") + ws = wb.active + + ws.cell(row=1, column=1, value="foo") + self.assertEqual(ws["A1"].value, "foo") + + ws.cell(row=2, column=1, value="foo") + self.assertEqual(ws["A2"].value, "foo") + + set_range(wb, self.range_name, ["bar"]) + self.assertEqual(ws["A1"].value, "bar") # New value + self.assertEqual(ws["A2"].value, "foo") # Unchanged + + def test_set_range_multi_dests(self): + """ + Error when multiple destinations found + """ + wb = Workbook() + ws = wb.active + + # Create named range with multiple destinations + ref = f"{quote_sheetname(ws.title)}!{absolute_coordinate('A6')}, \ + {quote_sheetname(ws.title)}!{absolute_coordinate('B6')}" + defn = DefinedName(self.range_name, attr_text=ref) + wb.defined_names.add(defn) + + self.assertEqual(len(list(wb.defined_names[self.range_name].destinations)), 2) + self.assertRaises( + DataMigrationValueError, set_range, wb, self.range_name, ["bar"] + ) + + def test_set_range_ws_missing(self): + """ + Error when the named range isn't in the given WS + """ + wb = Workbook() + + # Create named range with bad sheet title + ref = f"{quote_sheetname('wrong name')}!{absolute_coordinate('A6')}" + defn = DefinedName(self.range_name, attr_text=ref) + wb.defined_names.add(defn) + + self.assertRaises(KeyError, set_range, wb, self.range_name, ["bar"]) + + +class TestApplyConversionFunction(TestCase): + def test_string_conversion(self): + """Test that a string is returned unchanged""" + self.assertEqual(apply_conversion_function("test", "default", str), "test") + + def test_int_conversion(self): + """Test that an int is properly returned""" + self.assertEqual(apply_conversion_function("123", "default", int), 123) + + def test_custom_conversion(self): + """Test that custom conversion function is applied when present""" + self.assertEqual( + apply_conversion_function("test", "default", lambda x: x.upper()), "TEST" + ) + + def test_default_value(self): + """Test that a default value is returned when the input is None""" + self.assertEqual(apply_conversion_function(None, "default", str), "default") + + def test_none_with_no_default(self): + """Test that an exception is raised when the input is None and no default is provided""" + with self.assertRaises(DataMigrationValueError): + apply_conversion_function(None, None, str) + + +class TestExcelSanitization(TestCase): + def test_contains_illegal_characters(self): + """Test that contains_illegal_characters returns expected boolean values""" + self.assertTrue( + contains_illegal_characters("Some\x01text\x02with\x03control\x04characters") + ) # Contains control characters + self.assertFalse( + contains_illegal_characters("Some text with no control characters") + ) # No control characters + self.assertFalse( + contains_illegal_characters("\nNew Line\n") + ) # Newline character is allowed + + def test_sanitize_for_excel(self): + """Test that sanitize_for_excel returns expected values""" + self.assertEqual( + sanitize_for_excel("Some\x01Text\x02With\x03Control\x04Characters"), + "SomeTextWithControlCharacters", + ) # Control character removed + self.assertEqual( + sanitize_for_excel("Some text with no control characters"), + "Some text with no control characters", + ) # No change needed + self.assertEqual( + sanitize_for_excel("\nNew Line\n"), "\nNew Line\n" + ) # Newline preserved + + +class TestSortRecordsByField(TestCase): + class MockRecord: + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + + def test_empty_list(self): + """Test sorting with an empty list.""" + self.assertEqual(sort_by_field([], "some_field"), []) + + def test_sorting(self): + """Test sorting with a non-empty list.""" + records = [ + self.MockRecord(seq_number="1"), + self.MockRecord(seq_number="10"), + self.MockRecord(seq_number="2"), + ] + sorted_records = sort_by_field(records, "seq_number") + self.assertEqual( + [record.seq_number for record in sorted_records], ["1", "2", "10"] + ) + + def test_sorting_with_empty_field(self): + """Test sorting with empty sort field.""" + records = [ + self.MockRecord(other="1", seq_number=""), + self.MockRecord(other="10", seq_number=""), + self.MockRecord(other="2", seq_number=""), + ] + sorted_records = sort_by_field(records, "seq_number") + self.assertEqual([record.other for record in sorted_records], ["1", "10", "2"]) diff --git a/backend/census_historical_migration/test_federal_awards_xforms.py b/backend/census_historical_migration/test_federal_awards_xforms.py index 5cabd2e881..6282b231f9 100644 --- a/backend/census_historical_migration/test_federal_awards_xforms.py +++ b/backend/census_historical_migration/test_federal_awards_xforms.py @@ -1,1000 +1,1000 @@ -from unittest.mock import patch -from django.conf import settings -from django.test import SimpleTestCase, TestCase - -from .invalid_migration_tags import INVALID_MIGRATION_TAGS -from .invalid_record import InvalidRecord -from .transforms.xform_string_to_string import ( - string_to_string, -) -from .exception_utils import DataMigrationError -from .workbooklib.federal_awards import ( - is_valid_prefix, - track_invalid_federal_program_total, - track_invalid_number_of_audit_findings, - xform_match_number_passthrough_names_ids, - xform_missing_amount_expended, - xform_missing_program_total, - xform_missing_cluster_total_v2, - xform_populate_default_award_identification_values, - xform_populate_default_loan_balance, - xform_constructs_cluster_names, - xform_populate_default_passthrough_amount, - xform_populate_default_passthrough_names_ids, - xform_replace_invalid_direct_award_flag, - xform_replace_invalid_extension, - xform_program_name, - xform_is_passthrough_award, - xform_missing_major_program, - is_valid_extension, - xform_cluster_names, - xform_replace_missing_prefix, - xform_replace_required_values_with_gsa_migration_when_empty, - xform_sanitize_additional_award_identification, -) - - -class TestXformConstructsClusterNames(SimpleTestCase): - class MockAudit: - def __init__(self, cluster_name, state_cluster_name, other_cluster_name): - self.CLUSTERNAME = cluster_name - self.STATECLUSTERNAME = state_cluster_name - self.OTHERCLUSTERNAME = other_cluster_name - - def test_empty_audits_list(self): - """Test the function with an empty audits list.""" - result = xform_constructs_cluster_names([]) - self.assertEqual(result, ([], [], [])) - - def test_empty_cluster_names(self): - """Test the function with empty cluster names.""" - audits = [self.MockAudit("", "", "")] - result = xform_constructs_cluster_names(audits) - self.assertEqual(result, ([settings.GSA_MIGRATION], [""], [""])) - - audits.append(self.MockAudit("", "Some State Cluster", "")) - result = xform_constructs_cluster_names(audits) - result = tuple(sorted(sublist) for sublist in result) - expected = ( - [settings.GSA_MIGRATION, settings.GSA_MIGRATION], - ["", ""], - ["", "Some State Cluster"], - ) - expected = tuple(sorted(sublist) for sublist in expected) - self.assertEqual(result, expected) - - audits.append(self.MockAudit("", "", "Some Other Cluster")) - result = xform_constructs_cluster_names(audits) - result = tuple(sorted(sublist) for sublist in result) - expected = ( - [settings.GSA_MIGRATION, settings.GSA_MIGRATION, settings.GSA_MIGRATION], - ["", "", "Some Other Cluster"], - ["", "Some State Cluster", ""], - ) - expected = tuple(sorted(sublist) for sublist in expected) - self.assertEqual( - result, - expected, - ) - - def test_valid_cluster_name_combinations(self): - """Test the function with valid cluster name combinations.""" - audits = [self.MockAudit("Normal Cluster", "", "")] - result = xform_constructs_cluster_names(audits) - self.assertEqual(result, (["Normal Cluster"], [""], [""])) - - audits.append(self.MockAudit("STATE CLUSTER", "Some State Cluster", "")) - result = xform_constructs_cluster_names(audits) - result = tuple(sorted(sublist) for sublist in result) - expected = ( - ["Normal Cluster", "STATE CLUSTER"], - ["", ""], - ["", "Some State Cluster"], - ) - expected = tuple(sorted(sublist) for sublist in expected) - self.assertEqual( - result, - expected, - ) - - audits.append( - self.MockAudit("OTHER CLUSTER NOT LISTED ABOVE", "", "Some Other Cluster") - ) - result = xform_constructs_cluster_names(audits) - result = tuple(sorted(sublist) for sublist in result) - expected = ( - ["Normal Cluster", "STATE CLUSTER", "OTHER CLUSTER NOT LISTED ABOVE"], - ["", "", "Some Other Cluster"], - ["", "Some State Cluster", ""], - ) - expected = tuple(sorted(sublist) for sublist in expected) - self.assertEqual( - result, - expected, - ) - - def test_data_migration_error(self): - """Test the function raises DataMigrationError when cluster name combination is invalid.""" - audits = [ - self.MockAudit("Cluster Name", "State Cluster Name", "Other Cluster Name") - ] - with self.assertRaises(DataMigrationError): - xform_constructs_cluster_names(audits) - - audits = [self.MockAudit("Cluster Name", "State Cluster Name", "")] - with self.assertRaises(DataMigrationError): - xform_constructs_cluster_names(audits) - - audits = [self.MockAudit("Cluster Name", "", "Other Cluster Name")] - with self.assertRaises(DataMigrationError): - xform_constructs_cluster_names(audits) - - -class TestXformPopulateDefaultLoanBalance(SimpleTestCase): - class MockAudit: - def __init__(self, LOANS, LOANBALANCE): - self.LOANS = LOANS - self.LOANBALANCE = LOANBALANCE - - def test_loan_with_no_balance(self): - """Test the function with a loan with no balance.""" - audits = [self.MockAudit(LOANS="Y", LOANBALANCE="")] - expected = [settings.GSA_MIGRATION] - self.assertEqual(xform_populate_default_loan_balance(audits), expected) - - def test_loan_with_balance(self): - """Test the function with a loan with a balance.""" - audits = [self.MockAudit(LOANS="Y", LOANBALANCE="100")] - expected = ["100"] - self.assertEqual(xform_populate_default_loan_balance(audits), expected) - - def test_no_loan(self): - """Test the function with no loan.""" - audits = [self.MockAudit(LOANS="N", LOANBALANCE="")] - expected = [""] - self.assertEqual(xform_populate_default_loan_balance(audits), expected) - - def test_no_loan_with_zero_balance(self): - """Test the function with no loan and zero balance.""" - audits = [self.MockAudit(LOANS="N", LOANBALANCE="0")] - expected = [""] - self.assertEqual(xform_populate_default_loan_balance(audits), expected) - - def test_unexpected_loan_balance(self): - """Test the function raises DataMigrationError when loan balance is unexpected.""" - audits = [self.MockAudit(LOANS="N", LOANBALANCE="100")] - with self.assertRaises(DataMigrationError): - xform_populate_default_loan_balance(audits) - - -class TestXformPopulateDefaultAwardIdentificationValues(SimpleTestCase): - class MockAudit: - def __init__(self, CFDA, AWARDIDENTIFICATION): - self.CFDA = CFDA - self.AWARDIDENTIFICATION = AWARDIDENTIFICATION - - def test_cfda_with_u_no_identification(self): - """Test the function with a CFDA with a U and no identification.""" - audits = [self.MockAudit(CFDA="123U", AWARDIDENTIFICATION="")] - expected = [settings.GSA_MIGRATION] - self.assertEqual( - xform_populate_default_award_identification_values(audits), expected - ) - - def test_cfda_with_rd_no_identification(self): - """Test the function with a CFDA with a RD and no identification.""" - audits = [self.MockAudit(CFDA="rd123", AWARDIDENTIFICATION="")] - expected = [settings.GSA_MIGRATION] - self.assertEqual( - xform_populate_default_award_identification_values(audits), expected - ) - - def test_cfda_without_u_or_rd(self): - """Test the function with a CFDA without a U or RD.""" - audits = [self.MockAudit(CFDA="12345", AWARDIDENTIFICATION="")] - expected = [""] - self.assertEqual( - xform_populate_default_award_identification_values(audits), expected - ) - - def test_cfda_with_identification(self): - """Test the function with a CFDA with an identification.""" - audits = [self.MockAudit(CFDA="U123", AWARDIDENTIFICATION="ABC")] - expected = ["ABC"] - self.assertEqual( - xform_populate_default_award_identification_values(audits), expected - ) - - -class TestXformPopulateDefaultPassthroughValues(SimpleTestCase): - class MockAudit: - def __init__(self, DIRECT, DBKEY, AUDITYEAR, ELECAUDITSID): - self.DIRECT = DIRECT - self.DBKEY = DBKEY - self.AUDITYEAR = AUDITYEAR - self.ELECAUDITSID = ELECAUDITSID - - class MockPassthrough: - def __init__(self, PASSTHROUGHNAME, PASSTHROUGHID): - self.PASSTHROUGHNAME = PASSTHROUGHNAME - self.PASSTHROUGHID = PASSTHROUGHID - - @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") - def test_direct_N_empty_name_and_id(self, mock_get_passthroughs): - """Test the function with a direct N audit with empty name and id.""" - mock_get_passthroughs.return_value = ([""], [""]) - audits = [ - self.MockAudit( - DIRECT="N", DBKEY="DBKEY2", AUDITYEAR="2022", ELECAUDITSID="1" - ) - ] - - names, ids = xform_populate_default_passthrough_names_ids(audits) - - self.assertEqual(names[0], settings.GSA_MIGRATION) - self.assertEqual(ids[0], settings.GSA_MIGRATION) - - @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") - def test_direct_N_non_empty_name_and_id(self, mock_get_passthroughs): - """Test the function with a direct N audit with non-empty name and id.""" - mock_get_passthroughs.return_value = (["Name1|Name2"], ["ID1|ID2"]) - audits = [ - self.MockAudit( - DIRECT="N", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="2" - ) - ] - - names, ids = xform_populate_default_passthrough_names_ids(audits) - - self.assertEqual(names[0], "Name1|Name2") - self.assertEqual(ids[0], "ID1|ID2") - - @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") - def test_direct_Y_empty_name_and_id(self, mock_get_passthroughs): - """Test the function with a direct Y audit with empty name and id.""" - mock_get_passthroughs.return_value = ([""], [""]) - audits = [ - self.MockAudit( - DIRECT="Y", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="3" - ) - ] - - names, ids = xform_populate_default_passthrough_names_ids(audits) - - self.assertEqual(names[0], "") - self.assertEqual(ids[0], "") - - @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") - def test_direct_Y_non_empty_name_and_id(self, mock_get_passthroughs): - """Test the function with a direct Y audit with non-empty name and id.""" - mock_get_passthroughs.return_value = (["Name"], ["ID"]) - audits = [ - self.MockAudit( - DIRECT="Y", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="4" - ) - ] - - names, ids = xform_populate_default_passthrough_names_ids(audits) - - self.assertEqual(names[0], "Name") - self.assertEqual(ids[0], "ID") - - -class TestXformPopulateDefaultPassthroughAmount(SimpleTestCase): - class MockAudit: - def __init__(self, PASSTHROUGHAWARD, PASSTHROUGHAMOUNT): - self.PASSTHROUGHAWARD = PASSTHROUGHAWARD - self.PASSTHROUGHAMOUNT = PASSTHROUGHAMOUNT - - def test_passthrough_award_Y_non_empty_amount(self): - """Test the function with a passthrough award Y audit with non-empty amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="100")] - expected = ["100"] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_N_empty_amount(self): - """Test the function with a passthrough award N audit with empty amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="")] - expected = [""] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_N_zero_amount(self): - """Test the function with a passthrough award N audit with zero amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="0")] - expected = [""] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_N_unexpected_amount(self): - """Test the function raises DataMigrationError when passthrough award N and amount is unexpected.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="100")] - with self.assertRaises(DataMigrationError): - xform_populate_default_passthrough_amount(audits) - - def test_passthrough_award_Y_empty_amount(self): - """Test for default value when passthrough award Y audit with empty amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="")] - expected = [str(settings.GSA_MIGRATION_INT)] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_gsa_non_empty_amount(self): - """Test the function with a passthrough award GSA_MIGRATION audit with non-empty amount.""" - audits = [ - self.MockAudit( - PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="100" - ) - ] - expected = ["100"] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_gsa_empty_amount(self): - """Test the function with a passthrough award GSA_MIGRATIONN audit with empty amount.""" - audits = [ - self.MockAudit( - PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="" - ) - ] - expected = [""] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - def test_passthrough_award_gsa_zero_amount(self): - """Test the function with a passthrough award GSA_MIGRATION audit with zero amount.""" - audits = [ - self.MockAudit( - PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="0" - ) - ] - expected = [""] - self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) - - -class TestXformIsPassthroughAward(SimpleTestCase): - class MockAudit: - def __init__(self, PASSTHROUGHAWARD, PASSTHROUGHAMOUNT): - self.PASSTHROUGHAWARD = PASSTHROUGHAWARD - self.PASSTHROUGHAMOUNT = PASSTHROUGHAMOUNT - - def test_passthrough_award_Y(self): - """Test the function with a valid passthrough award.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="0")] - expected = "Y" - xform_is_passthrough_award(audits) - self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) - - def test_passthrough_award_empty_with_amount(self): - """Test the function with an empty passthrough award and truthy amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="42")] - expected = settings.GSA_MIGRATION - xform_is_passthrough_award(audits) - self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) - - def test_passthrough_award_empty_with_no_amount(self): - """Test the function with an empty passthrough award and 0 amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="0")] - expected = settings.GSA_MIGRATION - xform_is_passthrough_award(audits) - self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) - - def test_passthrough_award_empty_with_empty_amount(self): - """Test the function with an empty passthrough award and empty amount.""" - audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="")] - expected = settings.GSA_MIGRATION - xform_is_passthrough_award(audits) - self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) - - -class TestCFDAFunctions(SimpleTestCase): - def test_is_valid_prefix(self): - """Test the function with valid and invalid prefixes.""" - self.assertTrue(is_valid_prefix("01")) - self.assertFalse(is_valid_prefix("ABC")) - self.assertFalse(is_valid_prefix("123")) - - def test_is_valid_extension(self): - """Test the function with valid and invalid extensions.""" - self.assertTrue(is_valid_extension("RD")) - self.assertTrue(is_valid_extension("RD1")) - self.assertTrue(is_valid_extension("123A")) - self.assertTrue(is_valid_extension("U01")) - # Invalid cases - self.assertFalse(is_valid_extension("RDABC")) - self.assertFalse(is_valid_extension("12345")) - self.assertFalse(is_valid_extension("UA123")) - - def test_xform_replace_invalid_extension(self): - class MockAudit: - def __init__(self, prefix, ext): - self.CFDA_PREFIX = prefix - self.CFDA_EXT = ext - - # Valid prefix and extension - audit = MockAudit("01", "RD1") - result, _ = xform_replace_invalid_extension(audit) - self.assertEqual(result, "01.RD1") - - # Invalid prefix - audit = MockAudit("ABC", "RD1") - with self.assertRaises(DataMigrationError): - xform_replace_invalid_extension(audit) - - # Invalid extension - audit = MockAudit("01", "ABC") - result, _ = xform_replace_invalid_extension(audit) - self.assertEqual(result, f"01.{settings.GSA_MIGRATION}") - - -class TestXformMissingProgramTotal(SimpleTestCase): - class AuditMock: - def __init__(self, cfda_prefix, cfda_ext, amount, program_total=None): - self.CFDA_PREFIX = cfda_prefix - self.CFDA_EXT = cfda_ext - self.AMOUNT = amount - self.PROGRAMTOTAL = program_total - - def test_with_missing_program_total(self): - """Test for missing program total""" - audits = [ - self.AuditMock("10", "123", "100"), - self.AuditMock("10", "123", "200"), - self.AuditMock("20", "456", "300", "300"), - ] - expected_program_totals = {"10.123": "300", "20.456": "300"} - - xform_missing_program_total(audits) - - for audit in audits: - cfda_key = f"{string_to_string(audit.CFDA_PREFIX)}.{string_to_string(audit.CFDA_EXT)}" - self.assertEqual(audit.PROGRAMTOTAL, expected_program_totals[cfda_key]) - - def test_preserve_existing_program_total(self): - """Test for non missing program total""" - audits = [ - self.AuditMock("10", "123", "100", "150"), - self.AuditMock("10", "123", "200", "350"), - ] - # In this case, we expect the original PROGRAMTOTAL values to be preserved - expected_program_totals = ["150", "350"] - - xform_missing_program_total(audits) - - for audit, expected in zip(audits, expected_program_totals): - self.assertEqual(audit.PROGRAMTOTAL, expected) - - -class TestXformMissingClusterTotal(SimpleTestCase): - class AuditMock: - def __init__( - self, - amount, - cluster_name, - state_cluster_name="", - other_cluster_name="", - cluster_total="", - ): - self.AMOUNT = amount - self.CLUSTERTOTAL = cluster_total - self.CLUSTERNAME = cluster_name - self.STATECLUSTERNAME = state_cluster_name - self.OTHERCLUSTERNAME = other_cluster_name - - def test_xform_missing_cluster_total(self): - """Test for missing cluster total""" - audits = [ - self.AuditMock("100", "Cluster A"), - self.AuditMock("150", "Cluster B"), - self.AuditMock("150", "Cluster A", cluster_total="250"), - ] - cluster_names = ["Cluster A", "Cluster B", "Cluster A"] - other_cluster_names = ["", "", ""] - state_cluster_names = ["", "", ""] - expected_totals = ["250", "150", "250"] - - xform_missing_cluster_total_v2( - audits, cluster_names, other_cluster_names, state_cluster_names - ) - - for audit, expected in zip(audits, expected_totals): - self.assertEqual(str(audit.CLUSTERTOTAL), expected) - - def test_xform_missing_cluster_total_with_state_cluster(self): - """Test for missing state cluster total""" - audits = [ - self.AuditMock("100", "Cluster A"), - self.AuditMock( - "150", - settings.STATE_CLUSTER, - state_cluster_name="State Cluster A", - cluster_total="300", - ), - self.AuditMock("150", "Cluster A", cluster_total="250"), - self.AuditMock( - "150", settings.STATE_CLUSTER, state_cluster_name="State Cluster A" - ), - ] - cluster_names = [ - "Cluster A", - settings.STATE_CLUSTER, - "Cluster A", - settings.STATE_CLUSTER, - ] - other_cluster_names = ["", "", "", ""] - state_cluster_names = ["", "State Cluster A", "", "State Cluster A"] - expected_totals = ["250", "300", "250", "300"] - - xform_missing_cluster_total_v2( - audits, cluster_names, other_cluster_names, state_cluster_names - ) - - for audit, expected in zip(audits, expected_totals): - self.assertEqual(str(audit.CLUSTERTOTAL), expected) - - def test_xform_missing_cluster_total_with_other_cluster(self): - """Test for missing other cluster total""" - audits = [ - self.AuditMock("100", "Cluster A"), - self.AuditMock( - "150", - settings.OTHER_CLUSTER, - other_cluster_name="Other Cluster A", - cluster_total="300", - ), - self.AuditMock("150", "Cluster A", cluster_total="250"), - self.AuditMock( - "150", settings.OTHER_CLUSTER, other_cluster_name="Other Cluster A" - ), - ] - cluster_names = [ - "Cluster A", - settings.OTHER_CLUSTER, - "Cluster A", - settings.OTHER_CLUSTER, - ] - state_cluster_names = ["", "", "", ""] - other_cluster_names = ["", "Other Cluster A", "", "Other Cluster A"] - expected_totals = ["250", "300", "250", "300"] - - xform_missing_cluster_total_v2( - audits, cluster_names, other_cluster_names, state_cluster_names - ) - - for audit, expected in zip(audits, expected_totals): - self.assertEqual(str(audit.CLUSTERTOTAL), expected) - - def test_xform_missing_cluster_total_invalid(self): - """Test for incorrect cluster total""" - audits = [ - self.AuditMock("150", "Cluster A", cluster_total="33"), - ] - cluster_names = ["Cluster A"] - other_cluster_names = [""] - state_cluster_names = [""] - expected_totals = ["33"] - - InvalidRecord.reset() - xform_missing_cluster_total_v2( - audits, cluster_names, other_cluster_names, state_cluster_names - ) - - for audit, expected in zip(audits, expected_totals): - self.assertEqual(str(audit.CLUSTERTOTAL), expected) - - self.assertEqual( - InvalidRecord.fields["validations_to_skip"], - ["cluster_total_is_correct"], - ) - self.assertEqual( - InvalidRecord.fields["federal_award"], - [ - [ - { - "census_data": [ - {"column": "CLUSTERTOTAL", "value": 33}, - {"column": "CLUSTERNAME", "value": "Cluster A"}, - {"column": "STATECLUSTERNAME", "value": ""}, - {"column": "OTHERCLUSTERNAME", "value": ""}, - ], - "gsa_fac_data": {"field": "cluster_total", "value": 150}, - } - ] - ], - ) - - -class TestXformMissingAmountExpended(SimpleTestCase): - class AuditMock: - def __init__( - self, - amount, - ): - self.AMOUNT = amount - - def setUp(self): - self.audits = [ - self.AuditMock("100"), # Normal case - self.AuditMock(""), # Empty string should default to '0' - self.AuditMock(None), # None should default to '0' - ] - - def test_xform_missing_amount_expended(self): - """Test for missing amount expended""" - - xform_missing_amount_expended(self.audits) - - expected_results = ["100", "0", "0"] - actual_results = [audit.AMOUNT for audit in self.audits] - - self.assertEqual(actual_results, expected_results) - - -class TestXformMatchNumberPassthroughNamesIds(SimpleTestCase): - def test_match_numbers_all_empty(self): - """Test the function with all empty names and ids.""" - names = ["", "", ""] - ids = ["", "", ""] - expected_ids = ["", "", ""] - - transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( - names, ids - ) - - self.assertEqual(transformed_names, names) - self.assertEqual(transformed_ids, expected_ids) - - def test_match_numbers_non_empty_names_empty_ids(self): - """Test the function with non-empty names and empty ids.""" - names = ["name1|name2", "name3|name4"] - ids = ["", ""] - expected_ids = [ - f"{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", - f"{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", - ] - - transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( - names, ids - ) - - self.assertEqual(transformed_names, names) - self.assertEqual(transformed_ids, expected_ids) - - def test_match_numbers_mixed_empty_and_non_empty(self): - """Test the function with mixed empty and non-empty names and ids.""" - names = ["name1|name2|name3", "name4", "", "name5", ""] - ids = ["id1", "", "id2", "id3|id4", ""] - expected_ids = [ - f"id1|{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", - f"{settings.GSA_MIGRATION}", - "id2", - "id3|id4", - "", - ] - - expected_names = [ - "name1|name2|name3", - "name4", - f"{settings.GSA_MIGRATION}", - f"name5|{settings.GSA_MIGRATION}", - "", - ] - - transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( - names, ids - ) - - self.assertEqual(transformed_names, expected_names) - self.assertEqual(transformed_ids, expected_ids) - - -class TestXformMissingMajorProgram(SimpleTestCase): - class AuditMock: - def __init__( - self, - major_program, - audit_type, - ): - self.MAJORPROGRAM = major_program - self.TYPEREPORT_MP = audit_type - - def test_xform_normal_major_program(self): - """Test for normal major program""" - audits = [self.AuditMock("Y", "U")] - - xform_missing_major_program(audits) - - self.assertEqual(audits[0].MAJORPROGRAM, "Y") - - def test_xform_missing_major_program_with_audit_type(self): - """Test for missing major program with audit type provided""" - audits = [self.AuditMock("", "U")] - - xform_missing_major_program(audits) - - self.assertEqual(audits[0].MAJORPROGRAM, "Y") - - def test_xform_missing_major_program_without_audit_type(self): - """Test for missing major program without audit type provided""" - audits = [self.AuditMock("", "")] - - xform_missing_major_program(audits) - - self.assertEqual(audits[0].MAJORPROGRAM, "N") - - -class TestXformMissingProgramName(SimpleTestCase): - class AuditMock: - def __init__(self, program_name): - self.FEDERALPROGRAMNAME = program_name - - def test_with_normal_program_name(self): - """Test for missing program name""" - audits = [self.AuditMock("Some fake name")] - - xform_program_name(audits) - - self.assertEqual(audits[0].FEDERALPROGRAMNAME, "Some fake name") - - def test_with_missing_program_name(self): - """Test for missing program name""" - audits = [self.AuditMock("")] - - xform_program_name(audits) - - self.assertEqual(audits[0].FEDERALPROGRAMNAME, settings.GSA_MIGRATION) - - -class TestXformClusterNames(SimpleTestCase): - class MockAudit: - def __init__(self, cluster_name): - self.CLUSTERNAME = cluster_name - - def test_cluster_name_not_other_cluster(self): - audits = [] - audits.append(self.MockAudit("STUDENT FINANCIAL ASSISTANCE")) - audits.append(self.MockAudit("RESEARCH AND DEVELOPMENT")) - result = xform_cluster_names(audits) - for index in range(len(result)): - self.assertEqual(result[index].CLUSTERNAME, audits[index].CLUSTERNAME) - - def test_cluster_name_other_cluster(self): - audits = [] - audits.append(self.MockAudit("OTHER CLUSTER")) - audits.append(self.MockAudit("OTHER CLUSTER")) - result = xform_cluster_names(audits) - for audit in result: - self.assertEqual(audit.CLUSTERNAME, settings.OTHER_CLUSTER) - - -class TestXformReplaceInvalidDirectAwardFlag(SimpleTestCase): - class MockAudit: - def __init__(self, direct_flag): - self.DIRECT = direct_flag - - def test_replace_invalid_direct_award_flag(self): - audits = [self.MockAudit("Y"), self.MockAudit("N"), self.MockAudit("Y")] - passthrough_names = [ - "some name", - "some other name", - "", - ] - - results = xform_replace_invalid_direct_award_flag(audits, passthrough_names) - # Expect first audit DIRECT flag to be replaced with default - self.assertEqual(results[0], settings.GSA_MIGRATION) - # Expect second audit DIRECT flag to remain unchanged - self.assertEqual(results[1], "N") - # Expect third audit DIRECT flag to remain unchanged - self.assertEqual(results[2], "Y") - - -class TestXformSanitizeAdditionalAwardIdentification(SimpleTestCase): - class MockAudit: - def __init__(self, value): - self.AWARDIDENTIFICATION = value - - def test_sanitize_valid(self): - audits = [self.MockAudit('=""12345"'), self.MockAudit("text")] - identifications = ['=""12345"', "text"] - expected = ["12345", "text"] - result = xform_sanitize_additional_award_identification(audits, identifications) - self.assertEqual(result, expected) - - def test_empty_and_none_identifications(self): - audits = [self.MockAudit(""), self.MockAudit(None), self.MockAudit('=""12345"')] - identifications = ["", None, '=""12345"'] - expected = ["", None, "12345"] - result = xform_sanitize_additional_award_identification(audits, identifications) - self.assertEqual(result, expected) - - -class TestTrackInvalidFederalProgramTotal(SimpleTestCase): - class MockAudit: - def __init__(self, PROGRAMTOTAL, AMOUNT): - self.PROGRAMTOTAL = PROGRAMTOTAL - self.AMOUNT = AMOUNT - - def setUp(self): - self.audits = [ - self.MockAudit(PROGRAMTOTAL="100", AMOUNT="50"), - self.MockAudit(PROGRAMTOTAL="200", AMOUNT="200"), - self.MockAudit(PROGRAMTOTAL="150", AMOUNT="100"), - ] - self.cfda_key_values = ["1234", "5678", "1234"] - - # Reset InvalidRecord before each test - InvalidRecord.reset() - - def test_track_invalid_federal_program_total(self): - """Test for invalid federal program total""" - - track_invalid_federal_program_total(self.audits, self.cfda_key_values) - - # Check the results in InvalidRecord - self.assertEqual(len(InvalidRecord.fields["federal_award"]), 1) - self.assertIn( - "federal_program_total_is_correct", - InvalidRecord.fields["validations_to_skip"], - ) - self.assertIn( - INVALID_MIGRATION_TAGS.INVALID_FEDERAL_PROGRAM_TOTAL, - InvalidRecord.fields["invalid_migration_tag"], - ) - - def test_no_invalid_federal_program_total(self): - # Adjust mock data for no invalid records - self.audits = [ - self.MockAudit(PROGRAMTOTAL="150", AMOUNT="50"), - self.MockAudit(PROGRAMTOTAL="200", AMOUNT="200"), - self.MockAudit(PROGRAMTOTAL="150", AMOUNT="100"), - ] - - track_invalid_federal_program_total(self.audits, self.cfda_key_values) - - # Check that no invalid records are added - self.assertEqual(len(InvalidRecord.fields["federal_award"]), 0) - self.assertNotIn( - "federal_program_total_is_correct", - InvalidRecord.fields["validations_to_skip"], - ) - self.assertNotIn - - -class TestTrackInvalidNumberOfAuditFindings(TestCase): - - class MockAudit: - def __init__(self, elec_audits_id, findings_count): - self.ELECAUDITSID = elec_audits_id - self.FINDINGSCOUNT = findings_count - - class MockFinding: - def __init__(self, elec_audits_id): - self.ELECAUDITSID = elec_audits_id - - class MockAuditHeader: - def __init__(self, dbkey, audityear): - self.DBKEY = dbkey - self.AUDITYEAR = audityear - - def setUp(self): - - InvalidRecord.reset() - - @patch("census_historical_migration.workbooklib.federal_awards.get_findings") - def test_track_invalid_number_of_audit_findings(self, mock_get_findings): - """Test tracking for invalid number of audit findings""" - # Mock the findings and audits data - mock_findings = [ - self.MockFinding("audit1"), - self.MockFinding("audit1"), - ] - - mock_audits = [ - self.MockAudit("audit1", "2"), - self.MockAudit("audit2", "1"), - ] - - mock_audit_header = self.MockAuditHeader("some_dbkey", "2024") - - mock_get_findings.return_value = mock_findings - - track_invalid_number_of_audit_findings(mock_audits, mock_audit_header) - mock_get_findings.assert_called_once_with("some_dbkey", "2024") - # Check if the invalid records were appended correctly - expected_invalid_records = [ - [ - { - "census_data": [ - {"column": "ELECAUDITSID", "value": "audit1"}, - {"column": "FINDINGSCOUNT", "value": "2"}, - ], - "gsa_fac_data": {"field": "findings_count", "value": "2"}, - }, - { - "census_data": [ - {"column": "ELECAUDITSID", "value": "audit2"}, - {"column": "FINDINGSCOUNT", "value": "1"}, - ], - "gsa_fac_data": {"field": "findings_count", "value": "0"}, - }, - ] - ] - - for expected_record in expected_invalid_records: - self.assertIn(expected_record, InvalidRecord.fields["federal_award"]) - - @patch("census_historical_migration.workbooklib.federal_awards.get_findings") - def test_no_tracking_with_valid_number_of_audit_findings(self, mock_get_findings): - """Test no tracking for valid number of audit findings""" - # Mock the findings and audits data - mock_findings = [ - self.MockFinding("audit1"), - self.MockFinding("audit1"), - ] - - mock_audits = [ - self.MockAudit("audit1", "2"), - ] - - mock_audit_header = self.MockAuditHeader("some_dbkey", "2024") - mock_get_findings.return_value = mock_findings - - track_invalid_number_of_audit_findings(mock_audits, mock_audit_header) - - mock_get_findings.assert_called_once_with("some_dbkey", "2024") - self.assertEqual(len(InvalidRecord.fields["federal_award"]), 0) - - -class TestXformMissingPrefix(SimpleTestCase): - class MockAudit: - - def __init__(self, CFDA_PREFIX, CFDA): - self.CFDA_PREFIX = CFDA_PREFIX - self.CFDA = CFDA - - def test_for_no_missing_prefix(self): - """Test for no missing prefix""" - audits = [self.MockAudit("01", "01.123"), self.MockAudit("02", "02.456")] - - xform_replace_missing_prefix(audits) - - self.assertEqual(audits[0].CFDA_PREFIX, "01") - self.assertEqual(audits[1].CFDA_PREFIX, "02") - - def test_for_missing_prefix(self): - """Test for missing prefix""" - audits = [self.MockAudit("", "01.123"), self.MockAudit("02", "02.456")] - - xform_replace_missing_prefix(audits) - - self.assertEqual(audits[0].CFDA_PREFIX, "01") - self.assertEqual(audits[1].CFDA_PREFIX, "02") - - -class TestXformReplaceMissingFields(SimpleTestCase): - - class MockAudit: - - def __init__( - self, - LOANS, - DIRECT, - ): - self.LOANS = LOANS - self.DIRECT = DIRECT - - def test_replace_empty_fields(self): - audits = [ - self.MockAudit( - LOANS="", - DIRECT="", - ), - self.MockAudit( - LOANS="Present", - DIRECT="Present", - ), - ] - - xform_replace_required_values_with_gsa_migration_when_empty(audits) - - self.assertEqual(audits[0].LOANS, settings.GSA_MIGRATION) - self.assertEqual(audits[0].DIRECT, settings.GSA_MIGRATION) - - self.assertEqual(audits[1].LOANS, "Present") - self.assertEqual(audits[1].DIRECT, "Present") +from unittest.mock import patch +from django.conf import settings +from django.test import SimpleTestCase, TestCase + +from .invalid_migration_tags import INVALID_MIGRATION_TAGS +from .invalid_record import InvalidRecord +from .transforms.xform_string_to_string import ( + string_to_string, +) +from .exception_utils import DataMigrationError +from .workbooklib.federal_awards import ( + is_valid_prefix, + track_invalid_federal_program_total, + track_invalid_number_of_audit_findings, + xform_match_number_passthrough_names_ids, + xform_missing_amount_expended, + xform_missing_program_total, + xform_missing_cluster_total_v2, + xform_populate_default_award_identification_values, + xform_populate_default_loan_balance, + xform_constructs_cluster_names, + xform_populate_default_passthrough_amount, + xform_populate_default_passthrough_names_ids, + xform_replace_invalid_direct_award_flag, + xform_replace_invalid_extension, + xform_program_name, + xform_is_passthrough_award, + xform_missing_major_program, + is_valid_extension, + xform_cluster_names, + xform_replace_missing_prefix, + xform_replace_required_values_with_gsa_migration_when_empty, + xform_sanitize_additional_award_identification, +) + + +class TestXformConstructsClusterNames(SimpleTestCase): + class MockAudit: + def __init__(self, cluster_name, state_cluster_name, other_cluster_name): + self.CLUSTERNAME = cluster_name + self.STATECLUSTERNAME = state_cluster_name + self.OTHERCLUSTERNAME = other_cluster_name + + def test_empty_audits_list(self): + """Test the function with an empty audits list.""" + result = xform_constructs_cluster_names([]) + self.assertEqual(result, ([], [], [])) + + def test_empty_cluster_names(self): + """Test the function with empty cluster names.""" + audits = [self.MockAudit("", "", "")] + result = xform_constructs_cluster_names(audits) + self.assertEqual(result, ([settings.GSA_MIGRATION], [""], [""])) + + audits.append(self.MockAudit("", "Some State Cluster", "")) + result = xform_constructs_cluster_names(audits) + result = tuple(sorted(sublist) for sublist in result) + expected = ( + [settings.GSA_MIGRATION, settings.GSA_MIGRATION], + ["", ""], + ["", "Some State Cluster"], + ) + expected = tuple(sorted(sublist) for sublist in expected) + self.assertEqual(result, expected) + + audits.append(self.MockAudit("", "", "Some Other Cluster")) + result = xform_constructs_cluster_names(audits) + result = tuple(sorted(sublist) for sublist in result) + expected = ( + [settings.GSA_MIGRATION, settings.GSA_MIGRATION, settings.GSA_MIGRATION], + ["", "", "Some Other Cluster"], + ["", "Some State Cluster", ""], + ) + expected = tuple(sorted(sublist) for sublist in expected) + self.assertEqual( + result, + expected, + ) + + def test_valid_cluster_name_combinations(self): + """Test the function with valid cluster name combinations.""" + audits = [self.MockAudit("Normal Cluster", "", "")] + result = xform_constructs_cluster_names(audits) + self.assertEqual(result, (["Normal Cluster"], [""], [""])) + + audits.append(self.MockAudit("STATE CLUSTER", "Some State Cluster", "")) + result = xform_constructs_cluster_names(audits) + result = tuple(sorted(sublist) for sublist in result) + expected = ( + ["Normal Cluster", "STATE CLUSTER"], + ["", ""], + ["", "Some State Cluster"], + ) + expected = tuple(sorted(sublist) for sublist in expected) + self.assertEqual( + result, + expected, + ) + + audits.append( + self.MockAudit("OTHER CLUSTER NOT LISTED ABOVE", "", "Some Other Cluster") + ) + result = xform_constructs_cluster_names(audits) + result = tuple(sorted(sublist) for sublist in result) + expected = ( + ["Normal Cluster", "STATE CLUSTER", "OTHER CLUSTER NOT LISTED ABOVE"], + ["", "", "Some Other Cluster"], + ["", "Some State Cluster", ""], + ) + expected = tuple(sorted(sublist) for sublist in expected) + self.assertEqual( + result, + expected, + ) + + def test_data_migration_error(self): + """Test the function raises DataMigrationError when cluster name combination is invalid.""" + audits = [ + self.MockAudit("Cluster Name", "State Cluster Name", "Other Cluster Name") + ] + with self.assertRaises(DataMigrationError): + xform_constructs_cluster_names(audits) + + audits = [self.MockAudit("Cluster Name", "State Cluster Name", "")] + with self.assertRaises(DataMigrationError): + xform_constructs_cluster_names(audits) + + audits = [self.MockAudit("Cluster Name", "", "Other Cluster Name")] + with self.assertRaises(DataMigrationError): + xform_constructs_cluster_names(audits) + + +class TestXformPopulateDefaultLoanBalance(SimpleTestCase): + class MockAudit: + def __init__(self, LOANS, LOANBALANCE): + self.LOANS = LOANS + self.LOANBALANCE = LOANBALANCE + + def test_loan_with_no_balance(self): + """Test the function with a loan with no balance.""" + audits = [self.MockAudit(LOANS="Y", LOANBALANCE="")] + expected = [settings.GSA_MIGRATION] + self.assertEqual(xform_populate_default_loan_balance(audits), expected) + + def test_loan_with_balance(self): + """Test the function with a loan with a balance.""" + audits = [self.MockAudit(LOANS="Y", LOANBALANCE="100")] + expected = ["100"] + self.assertEqual(xform_populate_default_loan_balance(audits), expected) + + def test_no_loan(self): + """Test the function with no loan.""" + audits = [self.MockAudit(LOANS="N", LOANBALANCE="")] + expected = [""] + self.assertEqual(xform_populate_default_loan_balance(audits), expected) + + def test_no_loan_with_zero_balance(self): + """Test the function with no loan and zero balance.""" + audits = [self.MockAudit(LOANS="N", LOANBALANCE="0")] + expected = [""] + self.assertEqual(xform_populate_default_loan_balance(audits), expected) + + def test_unexpected_loan_balance(self): + """Test the function raises DataMigrationError when loan balance is unexpected.""" + audits = [self.MockAudit(LOANS="N", LOANBALANCE="100")] + with self.assertRaises(DataMigrationError): + xform_populate_default_loan_balance(audits) + + +class TestXformPopulateDefaultAwardIdentificationValues(SimpleTestCase): + class MockAudit: + def __init__(self, CFDA, AWARDIDENTIFICATION): + self.CFDA = CFDA + self.AWARDIDENTIFICATION = AWARDIDENTIFICATION + + def test_cfda_with_u_no_identification(self): + """Test the function with a CFDA with a U and no identification.""" + audits = [self.MockAudit(CFDA="123U", AWARDIDENTIFICATION="")] + expected = [settings.GSA_MIGRATION] + self.assertEqual( + xform_populate_default_award_identification_values(audits), expected + ) + + def test_cfda_with_rd_no_identification(self): + """Test the function with a CFDA with a RD and no identification.""" + audits = [self.MockAudit(CFDA="rd123", AWARDIDENTIFICATION="")] + expected = [settings.GSA_MIGRATION] + self.assertEqual( + xform_populate_default_award_identification_values(audits), expected + ) + + def test_cfda_without_u_or_rd(self): + """Test the function with a CFDA without a U or RD.""" + audits = [self.MockAudit(CFDA="12345", AWARDIDENTIFICATION="")] + expected = [""] + self.assertEqual( + xform_populate_default_award_identification_values(audits), expected + ) + + def test_cfda_with_identification(self): + """Test the function with a CFDA with an identification.""" + audits = [self.MockAudit(CFDA="U123", AWARDIDENTIFICATION="ABC")] + expected = ["ABC"] + self.assertEqual( + xform_populate_default_award_identification_values(audits), expected + ) + + +class TestXformPopulateDefaultPassthroughValues(SimpleTestCase): + class MockAudit: + def __init__(self, DIRECT, DBKEY, AUDITYEAR, ELECAUDITSID): + self.DIRECT = DIRECT + self.DBKEY = DBKEY + self.AUDITYEAR = AUDITYEAR + self.ELECAUDITSID = ELECAUDITSID + + class MockPassthrough: + def __init__(self, PASSTHROUGHNAME, PASSTHROUGHID): + self.PASSTHROUGHNAME = PASSTHROUGHNAME + self.PASSTHROUGHID = PASSTHROUGHID + + @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") + def test_direct_N_empty_name_and_id(self, mock_get_passthroughs): + """Test the function with a direct N audit with empty name and id.""" + mock_get_passthroughs.return_value = ([""], [""]) + audits = [ + self.MockAudit( + DIRECT="N", DBKEY="DBKEY2", AUDITYEAR="2022", ELECAUDITSID="1" + ) + ] + + names, ids = xform_populate_default_passthrough_names_ids(audits) + + self.assertEqual(names[0], settings.GSA_MIGRATION) + self.assertEqual(ids[0], settings.GSA_MIGRATION) + + @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") + def test_direct_N_non_empty_name_and_id(self, mock_get_passthroughs): + """Test the function with a direct N audit with non-empty name and id.""" + mock_get_passthroughs.return_value = (["Name1|Name2"], ["ID1|ID2"]) + audits = [ + self.MockAudit( + DIRECT="N", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="2" + ) + ] + + names, ids = xform_populate_default_passthrough_names_ids(audits) + + self.assertEqual(names[0], "Name1|Name2") + self.assertEqual(ids[0], "ID1|ID2") + + @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") + def test_direct_Y_empty_name_and_id(self, mock_get_passthroughs): + """Test the function with a direct Y audit with empty name and id.""" + mock_get_passthroughs.return_value = ([""], [""]) + audits = [ + self.MockAudit( + DIRECT="Y", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="3" + ) + ] + + names, ids = xform_populate_default_passthrough_names_ids(audits) + + self.assertEqual(names[0], "") + self.assertEqual(ids[0], "") + + @patch("census_historical_migration.workbooklib.federal_awards._get_passthroughs") + def test_direct_Y_non_empty_name_and_id(self, mock_get_passthroughs): + """Test the function with a direct Y audit with non-empty name and id.""" + mock_get_passthroughs.return_value = (["Name"], ["ID"]) + audits = [ + self.MockAudit( + DIRECT="Y", DBKEY="DBKEY1", AUDITYEAR="2022", ELECAUDITSID="4" + ) + ] + + names, ids = xform_populate_default_passthrough_names_ids(audits) + + self.assertEqual(names[0], "Name") + self.assertEqual(ids[0], "ID") + + +class TestXformPopulateDefaultPassthroughAmount(SimpleTestCase): + class MockAudit: + def __init__(self, PASSTHROUGHAWARD, PASSTHROUGHAMOUNT): + self.PASSTHROUGHAWARD = PASSTHROUGHAWARD + self.PASSTHROUGHAMOUNT = PASSTHROUGHAMOUNT + + def test_passthrough_award_Y_non_empty_amount(self): + """Test the function with a passthrough award Y audit with non-empty amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="100")] + expected = ["100"] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_N_empty_amount(self): + """Test the function with a passthrough award N audit with empty amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="")] + expected = [""] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_N_zero_amount(self): + """Test the function with a passthrough award N audit with zero amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="0")] + expected = [""] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_N_unexpected_amount(self): + """Test the function raises DataMigrationError when passthrough award N and amount is unexpected.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="N", PASSTHROUGHAMOUNT="100")] + with self.assertRaises(DataMigrationError): + xform_populate_default_passthrough_amount(audits) + + def test_passthrough_award_Y_empty_amount(self): + """Test for default value when passthrough award Y audit with empty amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="")] + expected = [str(settings.GSA_MIGRATION_INT)] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_gsa_non_empty_amount(self): + """Test the function with a passthrough award GSA_MIGRATION audit with non-empty amount.""" + audits = [ + self.MockAudit( + PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="100" + ) + ] + expected = ["100"] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_gsa_empty_amount(self): + """Test the function with a passthrough award GSA_MIGRATIONN audit with empty amount.""" + audits = [ + self.MockAudit( + PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="" + ) + ] + expected = [""] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + def test_passthrough_award_gsa_zero_amount(self): + """Test the function with a passthrough award GSA_MIGRATION audit with zero amount.""" + audits = [ + self.MockAudit( + PASSTHROUGHAWARD=settings.GSA_MIGRATION, PASSTHROUGHAMOUNT="0" + ) + ] + expected = [""] + self.assertEqual(xform_populate_default_passthrough_amount(audits), expected) + + +class TestXformIsPassthroughAward(SimpleTestCase): + class MockAudit: + def __init__(self, PASSTHROUGHAWARD, PASSTHROUGHAMOUNT): + self.PASSTHROUGHAWARD = PASSTHROUGHAWARD + self.PASSTHROUGHAMOUNT = PASSTHROUGHAMOUNT + + def test_passthrough_award_Y(self): + """Test the function with a valid passthrough award.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="Y", PASSTHROUGHAMOUNT="0")] + expected = "Y" + xform_is_passthrough_award(audits) + self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) + + def test_passthrough_award_empty_with_amount(self): + """Test the function with an empty passthrough award and truthy amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="42")] + expected = settings.GSA_MIGRATION + xform_is_passthrough_award(audits) + self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) + + def test_passthrough_award_empty_with_no_amount(self): + """Test the function with an empty passthrough award and 0 amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="0")] + expected = settings.GSA_MIGRATION + xform_is_passthrough_award(audits) + self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) + + def test_passthrough_award_empty_with_empty_amount(self): + """Test the function with an empty passthrough award and empty amount.""" + audits = [self.MockAudit(PASSTHROUGHAWARD="", PASSTHROUGHAMOUNT="")] + expected = settings.GSA_MIGRATION + xform_is_passthrough_award(audits) + self.assertEqual(audits[0].PASSTHROUGHAWARD, expected) + + +class TestCFDAFunctions(SimpleTestCase): + def test_is_valid_prefix(self): + """Test the function with valid and invalid prefixes.""" + self.assertTrue(is_valid_prefix("01")) + self.assertFalse(is_valid_prefix("ABC")) + self.assertFalse(is_valid_prefix("123")) + + def test_is_valid_extension(self): + """Test the function with valid and invalid extensions.""" + self.assertTrue(is_valid_extension("RD")) + self.assertTrue(is_valid_extension("RD1")) + self.assertTrue(is_valid_extension("123A")) + self.assertTrue(is_valid_extension("U01")) + # Invalid cases + self.assertFalse(is_valid_extension("RDABC")) + self.assertFalse(is_valid_extension("12345")) + self.assertFalse(is_valid_extension("UA123")) + + def test_xform_replace_invalid_extension(self): + class MockAudit: + def __init__(self, prefix, ext): + self.CFDA_PREFIX = prefix + self.CFDA_EXT = ext + + # Valid prefix and extension + audit = MockAudit("01", "RD1") + result, _ = xform_replace_invalid_extension(audit) + self.assertEqual(result, "01.RD1") + + # Invalid prefix + audit = MockAudit("ABC", "RD1") + with self.assertRaises(DataMigrationError): + xform_replace_invalid_extension(audit) + + # Invalid extension + audit = MockAudit("01", "ABC") + result, _ = xform_replace_invalid_extension(audit) + self.assertEqual(result, f"01.{settings.GSA_MIGRATION}") + + +class TestXformMissingProgramTotal(SimpleTestCase): + class AuditMock: + def __init__(self, cfda_prefix, cfda_ext, amount, program_total=None): + self.CFDA_PREFIX = cfda_prefix + self.CFDA_EXT = cfda_ext + self.AMOUNT = amount + self.PROGRAMTOTAL = program_total + + def test_with_missing_program_total(self): + """Test for missing program total""" + audits = [ + self.AuditMock("10", "123", "100"), + self.AuditMock("10", "123", "200"), + self.AuditMock("20", "456", "300", "300"), + ] + expected_program_totals = {"10.123": "300", "20.456": "300"} + + xform_missing_program_total(audits) + + for audit in audits: + cfda_key = f"{string_to_string(audit.CFDA_PREFIX)}.{string_to_string(audit.CFDA_EXT)}" + self.assertEqual(audit.PROGRAMTOTAL, expected_program_totals[cfda_key]) + + def test_preserve_existing_program_total(self): + """Test for non missing program total""" + audits = [ + self.AuditMock("10", "123", "100", "150"), + self.AuditMock("10", "123", "200", "350"), + ] + # In this case, we expect the original PROGRAMTOTAL values to be preserved + expected_program_totals = ["150", "350"] + + xform_missing_program_total(audits) + + for audit, expected in zip(audits, expected_program_totals): + self.assertEqual(audit.PROGRAMTOTAL, expected) + + +class TestXformMissingClusterTotal(SimpleTestCase): + class AuditMock: + def __init__( + self, + amount, + cluster_name, + state_cluster_name="", + other_cluster_name="", + cluster_total="", + ): + self.AMOUNT = amount + self.CLUSTERTOTAL = cluster_total + self.CLUSTERNAME = cluster_name + self.STATECLUSTERNAME = state_cluster_name + self.OTHERCLUSTERNAME = other_cluster_name + + def test_xform_missing_cluster_total(self): + """Test for missing cluster total""" + audits = [ + self.AuditMock("100", "Cluster A"), + self.AuditMock("150", "Cluster B"), + self.AuditMock("150", "Cluster A", cluster_total="250"), + ] + cluster_names = ["Cluster A", "Cluster B", "Cluster A"] + other_cluster_names = ["", "", ""] + state_cluster_names = ["", "", ""] + expected_totals = ["250", "150", "250"] + + xform_missing_cluster_total_v2( + audits, cluster_names, other_cluster_names, state_cluster_names + ) + + for audit, expected in zip(audits, expected_totals): + self.assertEqual(str(audit.CLUSTERTOTAL), expected) + + def test_xform_missing_cluster_total_with_state_cluster(self): + """Test for missing state cluster total""" + audits = [ + self.AuditMock("100", "Cluster A"), + self.AuditMock( + "150", + settings.STATE_CLUSTER, + state_cluster_name="State Cluster A", + cluster_total="300", + ), + self.AuditMock("150", "Cluster A", cluster_total="250"), + self.AuditMock( + "150", settings.STATE_CLUSTER, state_cluster_name="State Cluster A" + ), + ] + cluster_names = [ + "Cluster A", + settings.STATE_CLUSTER, + "Cluster A", + settings.STATE_CLUSTER, + ] + other_cluster_names = ["", "", "", ""] + state_cluster_names = ["", "State Cluster A", "", "State Cluster A"] + expected_totals = ["250", "300", "250", "300"] + + xform_missing_cluster_total_v2( + audits, cluster_names, other_cluster_names, state_cluster_names + ) + + for audit, expected in zip(audits, expected_totals): + self.assertEqual(str(audit.CLUSTERTOTAL), expected) + + def test_xform_missing_cluster_total_with_other_cluster(self): + """Test for missing other cluster total""" + audits = [ + self.AuditMock("100", "Cluster A"), + self.AuditMock( + "150", + settings.OTHER_CLUSTER, + other_cluster_name="Other Cluster A", + cluster_total="300", + ), + self.AuditMock("150", "Cluster A", cluster_total="250"), + self.AuditMock( + "150", settings.OTHER_CLUSTER, other_cluster_name="Other Cluster A" + ), + ] + cluster_names = [ + "Cluster A", + settings.OTHER_CLUSTER, + "Cluster A", + settings.OTHER_CLUSTER, + ] + state_cluster_names = ["", "", "", ""] + other_cluster_names = ["", "Other Cluster A", "", "Other Cluster A"] + expected_totals = ["250", "300", "250", "300"] + + xform_missing_cluster_total_v2( + audits, cluster_names, other_cluster_names, state_cluster_names + ) + + for audit, expected in zip(audits, expected_totals): + self.assertEqual(str(audit.CLUSTERTOTAL), expected) + + def test_xform_missing_cluster_total_invalid(self): + """Test for incorrect cluster total""" + audits = [ + self.AuditMock("150", "Cluster A", cluster_total="33"), + ] + cluster_names = ["Cluster A"] + other_cluster_names = [""] + state_cluster_names = [""] + expected_totals = ["33"] + + InvalidRecord.reset() + xform_missing_cluster_total_v2( + audits, cluster_names, other_cluster_names, state_cluster_names + ) + + for audit, expected in zip(audits, expected_totals): + self.assertEqual(str(audit.CLUSTERTOTAL), expected) + + self.assertEqual( + InvalidRecord.fields["validations_to_skip"], + ["cluster_total_is_correct"], + ) + self.assertEqual( + InvalidRecord.fields["federal_award"], + [ + [ + { + "census_data": [ + {"column": "CLUSTERTOTAL", "value": 33}, + {"column": "CLUSTERNAME", "value": "Cluster A"}, + {"column": "STATECLUSTERNAME", "value": ""}, + {"column": "OTHERCLUSTERNAME", "value": ""}, + ], + "gsa_fac_data": {"field": "cluster_total", "value": 150}, + } + ] + ], + ) + + +class TestXformMissingAmountExpended(SimpleTestCase): + class AuditMock: + def __init__( + self, + amount, + ): + self.AMOUNT = amount + + def setUp(self): + self.audits = [ + self.AuditMock("100"), # Normal case + self.AuditMock(""), # Empty string should default to '0' + self.AuditMock(None), # None should default to '0' + ] + + def test_xform_missing_amount_expended(self): + """Test for missing amount expended""" + + xform_missing_amount_expended(self.audits) + + expected_results = ["100", "0", "0"] + actual_results = [audit.AMOUNT for audit in self.audits] + + self.assertEqual(actual_results, expected_results) + + +class TestXformMatchNumberPassthroughNamesIds(SimpleTestCase): + def test_match_numbers_all_empty(self): + """Test the function with all empty names and ids.""" + names = ["", "", ""] + ids = ["", "", ""] + expected_ids = ["", "", ""] + + transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( + names, ids + ) + + self.assertEqual(transformed_names, names) + self.assertEqual(transformed_ids, expected_ids) + + def test_match_numbers_non_empty_names_empty_ids(self): + """Test the function with non-empty names and empty ids.""" + names = ["name1|name2", "name3|name4"] + ids = ["", ""] + expected_ids = [ + f"{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", + f"{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", + ] + + transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( + names, ids + ) + + self.assertEqual(transformed_names, names) + self.assertEqual(transformed_ids, expected_ids) + + def test_match_numbers_mixed_empty_and_non_empty(self): + """Test the function with mixed empty and non-empty names and ids.""" + names = ["name1|name2|name3", "name4", "", "name5", ""] + ids = ["id1", "", "id2", "id3|id4", ""] + expected_ids = [ + f"id1|{settings.GSA_MIGRATION}|{settings.GSA_MIGRATION}", + f"{settings.GSA_MIGRATION}", + "id2", + "id3|id4", + "", + ] + + expected_names = [ + "name1|name2|name3", + "name4", + f"{settings.GSA_MIGRATION}", + f"name5|{settings.GSA_MIGRATION}", + "", + ] + + transformed_names, transformed_ids = xform_match_number_passthrough_names_ids( + names, ids + ) + + self.assertEqual(transformed_names, expected_names) + self.assertEqual(transformed_ids, expected_ids) + + +class TestXformMissingMajorProgram(SimpleTestCase): + class AuditMock: + def __init__( + self, + major_program, + audit_type, + ): + self.MAJORPROGRAM = major_program + self.TYPEREPORT_MP = audit_type + + def test_xform_normal_major_program(self): + """Test for normal major program""" + audits = [self.AuditMock("Y", "U")] + + xform_missing_major_program(audits) + + self.assertEqual(audits[0].MAJORPROGRAM, "Y") + + def test_xform_missing_major_program_with_audit_type(self): + """Test for missing major program with audit type provided""" + audits = [self.AuditMock("", "U")] + + xform_missing_major_program(audits) + + self.assertEqual(audits[0].MAJORPROGRAM, "Y") + + def test_xform_missing_major_program_without_audit_type(self): + """Test for missing major program without audit type provided""" + audits = [self.AuditMock("", "")] + + xform_missing_major_program(audits) + + self.assertEqual(audits[0].MAJORPROGRAM, "N") + + +class TestXformMissingProgramName(SimpleTestCase): + class AuditMock: + def __init__(self, program_name): + self.FEDERALPROGRAMNAME = program_name + + def test_with_normal_program_name(self): + """Test for missing program name""" + audits = [self.AuditMock("Some fake name")] + + xform_program_name(audits) + + self.assertEqual(audits[0].FEDERALPROGRAMNAME, "Some fake name") + + def test_with_missing_program_name(self): + """Test for missing program name""" + audits = [self.AuditMock("")] + + xform_program_name(audits) + + self.assertEqual(audits[0].FEDERALPROGRAMNAME, settings.GSA_MIGRATION) + + +class TestXformClusterNames(SimpleTestCase): + class MockAudit: + def __init__(self, cluster_name): + self.CLUSTERNAME = cluster_name + + def test_cluster_name_not_other_cluster(self): + audits = [] + audits.append(self.MockAudit("STUDENT FINANCIAL ASSISTANCE")) + audits.append(self.MockAudit("RESEARCH AND DEVELOPMENT")) + result = xform_cluster_names(audits) + for index in range(len(result)): + self.assertEqual(result[index].CLUSTERNAME, audits[index].CLUSTERNAME) + + def test_cluster_name_other_cluster(self): + audits = [] + audits.append(self.MockAudit("OTHER CLUSTER")) + audits.append(self.MockAudit("OTHER CLUSTER")) + result = xform_cluster_names(audits) + for audit in result: + self.assertEqual(audit.CLUSTERNAME, settings.OTHER_CLUSTER) + + +class TestXformReplaceInvalidDirectAwardFlag(SimpleTestCase): + class MockAudit: + def __init__(self, direct_flag): + self.DIRECT = direct_flag + + def test_replace_invalid_direct_award_flag(self): + audits = [self.MockAudit("Y"), self.MockAudit("N"), self.MockAudit("Y")] + passthrough_names = [ + "some name", + "some other name", + "", + ] + + results = xform_replace_invalid_direct_award_flag(audits, passthrough_names) + # Expect first audit DIRECT flag to be replaced with default + self.assertEqual(results[0], settings.GSA_MIGRATION) + # Expect second audit DIRECT flag to remain unchanged + self.assertEqual(results[1], "N") + # Expect third audit DIRECT flag to remain unchanged + self.assertEqual(results[2], "Y") + + +class TestXformSanitizeAdditionalAwardIdentification(SimpleTestCase): + class MockAudit: + def __init__(self, value): + self.AWARDIDENTIFICATION = value + + def test_sanitize_valid(self): + audits = [self.MockAudit('=""12345"'), self.MockAudit("text")] + identifications = ['=""12345"', "text"] + expected = ["12345", "text"] + result = xform_sanitize_additional_award_identification(audits, identifications) + self.assertEqual(result, expected) + + def test_empty_and_none_identifications(self): + audits = [self.MockAudit(""), self.MockAudit(None), self.MockAudit('=""12345"')] + identifications = ["", None, '=""12345"'] + expected = ["", None, "12345"] + result = xform_sanitize_additional_award_identification(audits, identifications) + self.assertEqual(result, expected) + + +class TestTrackInvalidFederalProgramTotal(SimpleTestCase): + class MockAudit: + def __init__(self, PROGRAMTOTAL, AMOUNT): + self.PROGRAMTOTAL = PROGRAMTOTAL + self.AMOUNT = AMOUNT + + def setUp(self): + self.audits = [ + self.MockAudit(PROGRAMTOTAL="100", AMOUNT="50"), + self.MockAudit(PROGRAMTOTAL="200", AMOUNT="200"), + self.MockAudit(PROGRAMTOTAL="150", AMOUNT="100"), + ] + self.cfda_key_values = ["1234", "5678", "1234"] + + # Reset InvalidRecord before each test + InvalidRecord.reset() + + def test_track_invalid_federal_program_total(self): + """Test for invalid federal program total""" + + track_invalid_federal_program_total(self.audits, self.cfda_key_values) + + # Check the results in InvalidRecord + self.assertEqual(len(InvalidRecord.fields["federal_award"]), 1) + self.assertIn( + "federal_program_total_is_correct", + InvalidRecord.fields["validations_to_skip"], + ) + self.assertIn( + INVALID_MIGRATION_TAGS.INVALID_FEDERAL_PROGRAM_TOTAL, + InvalidRecord.fields["invalid_migration_tag"], + ) + + def test_no_invalid_federal_program_total(self): + # Adjust mock data for no invalid records + self.audits = [ + self.MockAudit(PROGRAMTOTAL="150", AMOUNT="50"), + self.MockAudit(PROGRAMTOTAL="200", AMOUNT="200"), + self.MockAudit(PROGRAMTOTAL="150", AMOUNT="100"), + ] + + track_invalid_federal_program_total(self.audits, self.cfda_key_values) + + # Check that no invalid records are added + self.assertEqual(len(InvalidRecord.fields["federal_award"]), 0) + self.assertNotIn( + "federal_program_total_is_correct", + InvalidRecord.fields["validations_to_skip"], + ) + self.assertNotIn + + +class TestTrackInvalidNumberOfAuditFindings(TestCase): + + class MockAudit: + def __init__(self, elec_audits_id, findings_count): + self.ELECAUDITSID = elec_audits_id + self.FINDINGSCOUNT = findings_count + + class MockFinding: + def __init__(self, elec_audits_id): + self.ELECAUDITSID = elec_audits_id + + class MockAuditHeader: + def __init__(self, dbkey, audityear): + self.DBKEY = dbkey + self.AUDITYEAR = audityear + + def setUp(self): + + InvalidRecord.reset() + + @patch("census_historical_migration.workbooklib.federal_awards.get_findings") + def test_track_invalid_number_of_audit_findings(self, mock_get_findings): + """Test tracking for invalid number of audit findings""" + # Mock the findings and audits data + mock_findings = [ + self.MockFinding("audit1"), + self.MockFinding("audit1"), + ] + + mock_audits = [ + self.MockAudit("audit1", "2"), + self.MockAudit("audit2", "1"), + ] + + mock_audit_header = self.MockAuditHeader("some_dbkey", "2024") + + mock_get_findings.return_value = mock_findings + + track_invalid_number_of_audit_findings(mock_audits, mock_audit_header) + mock_get_findings.assert_called_once_with("some_dbkey", "2024") + # Check if the invalid records were appended correctly + expected_invalid_records = [ + [ + { + "census_data": [ + {"column": "ELECAUDITSID", "value": "audit1"}, + {"column": "FINDINGSCOUNT", "value": "2"}, + ], + "gsa_fac_data": {"field": "findings_count", "value": "2"}, + }, + { + "census_data": [ + {"column": "ELECAUDITSID", "value": "audit2"}, + {"column": "FINDINGSCOUNT", "value": "1"}, + ], + "gsa_fac_data": {"field": "findings_count", "value": "0"}, + }, + ] + ] + + for expected_record in expected_invalid_records: + self.assertIn(expected_record, InvalidRecord.fields["federal_award"]) + + @patch("census_historical_migration.workbooklib.federal_awards.get_findings") + def test_no_tracking_with_valid_number_of_audit_findings(self, mock_get_findings): + """Test no tracking for valid number of audit findings""" + # Mock the findings and audits data + mock_findings = [ + self.MockFinding("audit1"), + self.MockFinding("audit1"), + ] + + mock_audits = [ + self.MockAudit("audit1", "2"), + ] + + mock_audit_header = self.MockAuditHeader("some_dbkey", "2024") + mock_get_findings.return_value = mock_findings + + track_invalid_number_of_audit_findings(mock_audits, mock_audit_header) + + mock_get_findings.assert_called_once_with("some_dbkey", "2024") + self.assertEqual(len(InvalidRecord.fields["federal_award"]), 0) + + +class TestXformMissingPrefix(SimpleTestCase): + class MockAudit: + + def __init__(self, CFDA_PREFIX, CFDA): + self.CFDA_PREFIX = CFDA_PREFIX + self.CFDA = CFDA + + def test_for_no_missing_prefix(self): + """Test for no missing prefix""" + audits = [self.MockAudit("01", "01.123"), self.MockAudit("02", "02.456")] + + xform_replace_missing_prefix(audits) + + self.assertEqual(audits[0].CFDA_PREFIX, "01") + self.assertEqual(audits[1].CFDA_PREFIX, "02") + + def test_for_missing_prefix(self): + """Test for missing prefix""" + audits = [self.MockAudit("", "01.123"), self.MockAudit("02", "02.456")] + + xform_replace_missing_prefix(audits) + + self.assertEqual(audits[0].CFDA_PREFIX, "01") + self.assertEqual(audits[1].CFDA_PREFIX, "02") + + +class TestXformReplaceMissingFields(SimpleTestCase): + + class MockAudit: + + def __init__( + self, + LOANS, + DIRECT, + ): + self.LOANS = LOANS + self.DIRECT = DIRECT + + def test_replace_empty_fields(self): + audits = [ + self.MockAudit( + LOANS="", + DIRECT="", + ), + self.MockAudit( + LOANS="Present", + DIRECT="Present", + ), + ] + + xform_replace_required_values_with_gsa_migration_when_empty(audits) + + self.assertEqual(audits[0].LOANS, settings.GSA_MIGRATION) + self.assertEqual(audits[0].DIRECT, settings.GSA_MIGRATION) + + self.assertEqual(audits[1].LOANS, "Present") + self.assertEqual(audits[1].DIRECT, "Present") diff --git a/backend/census_historical_migration/test_findings_xforms.py b/backend/census_historical_migration/test_findings_xforms.py index 1c03050c91..f8d3766cf7 100644 --- a/backend/census_historical_migration/test_findings_xforms.py +++ b/backend/census_historical_migration/test_findings_xforms.py @@ -1,356 +1,356 @@ -from django.conf import settings -from django.test import SimpleTestCase - -from census_historical_migration.invalid_migration_tags import INVALID_MIGRATION_TAGS -from census_historical_migration.invalid_record import InvalidRecord - -from .workbooklib.findings_text import ( - xform_add_placeholder_for_missing_text_of_finding, - xform_add_placeholder_for_missing_references, -) - -from .workbooklib.findings import ( - has_duplicate_ref_numbers, - track_invalid_records_with_repeated_ref_numbers, - xform_empty_repeat_prior_reference, - xform_replace_required_fields_with_gsa_migration_when_empty, - xform_sort_compliance_requirement, - xform_missing_compliance_requirement, -) - - -class TestXformSortComplianceRequirement(SimpleTestCase): - class Finding: - def __init__(self, typerequirement): - self.TYPEREQUIREMENT = typerequirement - - def test_sort_and_uppercase(self): - """Test that strings are sorted and uppercased.""" - findings = [self.Finding("abc"), self.Finding(" fca"), self.Finding("Bac")] - expected_results = ["ABC", "ACF", "ABC"] - - xform_sort_compliance_requirement(findings) - - for finding, expected in zip(findings, expected_results): - self.assertEqual(finding.TYPEREQUIREMENT, expected) - - def test_empty_string(self): - """Test that an empty string is not changed.""" - findings = [self.Finding("")] - xform_sort_compliance_requirement(findings) - self.assertEqual(findings[0].TYPEREQUIREMENT, "") - - def test_no_change_needed(self): - """Test that a string that is already sorted and uppercased is not changed.""" - findings = [self.Finding("ABC")] - xform_sort_compliance_requirement(findings) - self.assertEqual(findings[0].TYPEREQUIREMENT, "ABC") - - def test_with_spaces_and_case(self): - """Test that strings with spaces and mixed case are sorted and uppercased.""" - findings = [self.Finding(" aBc "), self.Finding("CBA "), self.Finding("bAc ")] - expected_results = ["ABC", "ABC", "ABC"] - - xform_sort_compliance_requirement(findings) - - for finding, expected in zip(findings, expected_results): - self.assertEqual(finding.TYPEREQUIREMENT, expected) - - -class TestXformAddPlaceholderForMissingFindingsText(SimpleTestCase): - class Findings: - def __init__(self, refnum): - self.FINDINGREFNUMS = refnum - - class FindingsText: - def __init__(self, refnum, seqnum=None, text=None, chartstables=None): - self.FINDINGREFNUMS = refnum - self.SEQ_NUMBER = seqnum - self.TEXT = text - self.CHARTSTABLES = chartstables - - def test_placeholder_addition_for_missing_references(self): - # Setup specific test data - mock_findings = [ - self.Findings("ref1"), - self.Findings("ref2"), - ] - mock_findings_texts = [ - self.FindingsText("ref1", "1", "text1", "chart1"), - ] - - findings_texts = xform_add_placeholder_for_missing_references( - mock_findings, mock_findings_texts - ) - - self.assertEqual( - len(findings_texts), 2 - ) # Expecting two items in findings_texts - self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") - self.assertEqual(findings_texts[1].TEXT, settings.GSA_MIGRATION) - self.assertEqual(findings_texts[1].CHARTSTABLES, settings.GSA_MIGRATION) - - def test_no_placeholder_addition_when_all_references_present(self): - # Setup specific test data - mock_findings = [ - self.Findings("ref1"), - self.Findings("ref2"), - ] - mock_findings_texts = [ - self.FindingsText("ref1", "1", "text1", "chart1"), - self.FindingsText("ref2", "2", "text2", "chart2"), - ] - - findings_texts = xform_add_placeholder_for_missing_references( - mock_findings, mock_findings_texts - ) - - self.assertEqual( - len(findings_texts), 2 - ) # Expecting two items in findings_texts - self.assertEqual(findings_texts[0].FINDINGREFNUMS, "ref1") - self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") - - -class TestXformAddPlaceholderForMissingTextOfFinding(SimpleTestCase): - - class FindingsText: - def __init__(self, FINDINGREFNUMS=None, TEXT=None): - self.FINDINGREFNUMS = FINDINGREFNUMS - self.TEXT = TEXT - - def test_add_placeholder_to_empty_text(self): - findings_texts = [self.FindingsText(FINDINGREFNUMS="123", TEXT="")] - expected_text = settings.GSA_MIGRATION - xform_add_placeholder_for_missing_text_of_finding(findings_texts) - self.assertEqual( - findings_texts[0].TEXT, - expected_text, - "The TEXT field should have the placeholder text.", - ) - - def test_no_placeholder_if_text_present(self): - findings_texts = [self.FindingsText(FINDINGREFNUMS="123", TEXT="Existing text")] - expected_text = "Existing text" - xform_add_placeholder_for_missing_text_of_finding(findings_texts) - self.assertEqual( - findings_texts[0].TEXT, - expected_text, - "The TEXT field should not be modified.", - ) - - def test_empty_finding_refnums_no_change(self): - findings_texts = [self.FindingsText(FINDINGREFNUMS="", TEXT="")] - xform_add_placeholder_for_missing_text_of_finding(findings_texts) - self.assertEqual( - findings_texts[0].TEXT, - "", - "The TEXT field should remain empty if FINDINGREFNUMS is empty.", - ) - - -class TestXformMissingComplianceRequirement(SimpleTestCase): - class Findings: - def __init__(self, type_requirement): - self.TYPEREQUIREMENT = type_requirement - - def test_missing_compliance_requirement(self): - mock_findings = [self.Findings("")] - - xform_missing_compliance_requirement(mock_findings) - - self.assertEqual(mock_findings[0].TYPEREQUIREMENT, settings.GSA_MIGRATION) - - def test_normal_compliance_requirement(self): - mock_findings = [self.Findings("ABC")] - - xform_missing_compliance_requirement(mock_findings) - - self.assertEqual(mock_findings[0].TYPEREQUIREMENT, "ABC") - - -class TestHasDuplicateRefNumbers(SimpleTestCase): - - class Finding: - def __init__(self, FINDINGREFNUMS, ELECAUDITSID): - self.FINDINGREFNUMS = FINDINGREFNUMS - self.ELECAUDITSID = ELECAUDITSID - - def test_no_duplicates(self): - """Test that no duplicates are found when there are no duplicate reference numbers.""" - findings = [self.Finding("ref1", "award1"), self.Finding("ref2", "award2")] - award_refs = [finding.ELECAUDITSID for finding in findings] - result = has_duplicate_ref_numbers(award_refs, findings) - self.assertFalse(result) - - def test_with_duplicates(self): - """Test that duplicates are found when there are duplicate reference numbers.""" - findings = [self.Finding("ref1", "award1"), self.Finding("ref1", "award1")] - award_refs = [finding.ELECAUDITSID for finding in findings] - result = has_duplicate_ref_numbers(award_refs, findings) - self.assertTrue(result) - - def test_different_awards(self): - """Test that duplicates are not found when the reference numbers are the same but the awards are different.""" - findings = [self.Finding("ref1", "award1"), self.Finding("ref1", "award2")] - award_refs = [finding.ELECAUDITSID for finding in findings] - result = has_duplicate_ref_numbers(award_refs, findings) - self.assertFalse(result) - - -class TestTrackInvalidRecordsWithRepeatedRefNumbers(SimpleTestCase): - - class Finding: - def __init__(self, FINDINGREFNUMS, ELECAUDITSID): - self.FINDINGREFNUMS = FINDINGREFNUMS - self.ELECAUDITSID = ELECAUDITSID - - def test_track_invalid_records(self): - - InvalidRecord.reset() - findings = [self.Finding("ref1", "id1"), self.Finding("ref1", "id1")] - award_refs = [finding.ELECAUDITSID for finding in findings] - track_invalid_records_with_repeated_ref_numbers(award_refs, findings) - - self.assertEqual( - InvalidRecord.fields["finding"], - [ - [ - { - "census_data": [ - {"column": "FINDINGREFNUMS", "value": "ref1"}, - {"column": "ELECAUDITSID", "value": "id1"}, - ], - "gsa_fac_data": {"field": "reference_number", "value": "ref1"}, - }, - { - "census_data": [ - {"column": "FINDINGREFNUMS", "value": "ref1"}, - {"column": "ELECAUDITSID", "value": "id1"}, - ], - "gsa_fac_data": {"field": "reference_number", "value": "ref1"}, - }, - ] - ], - ) - - self.assertEqual( - InvalidRecord.fields["validations_to_skip"], - ["check_finding_reference_uniqueness"], - ) - self.assertEqual( - InvalidRecord.fields["invalid_migration_tag"], - [INVALID_MIGRATION_TAGS.DUPLICATE_FINDING_REFERENCE_NUMBERS], - ) - - def test_no_invalid_records(self): - """Test that no invalid records are tracked when there are no duplicate reference numbers.""" - findings = [self.Finding("ref1", "id1"), self.Finding("ref2", "id2")] - award_refs = [finding.ELECAUDITSID for finding in findings] - InvalidRecord.reset() - - track_invalid_records_with_repeated_ref_numbers(award_refs, findings) - - self.assertEqual(InvalidRecord.fields["finding"], []) - self.assertEqual(InvalidRecord.fields["validations_to_skip"], []) - - -class TestXformReplaceRequiredFields(SimpleTestCase): - class Finding: - - def __init__( - self, - MODIFIEDOPINION, - OTHERNONCOMPLIANCE, - MATERIALWEAKNESS, - SIGNIFICANTDEFICIENCY, - OTHERFINDINGS, - QCOSTS, - FINDINGREFNUMS, - ): - self.MODIFIEDOPINION = MODIFIEDOPINION - self.OTHERNONCOMPLIANCE = OTHERNONCOMPLIANCE - self.MATERIALWEAKNESS = MATERIALWEAKNESS - self.SIGNIFICANTDEFICIENCY = SIGNIFICANTDEFICIENCY - self.OTHERFINDINGS = OTHERFINDINGS - self.QCOSTS = QCOSTS - self.FINDINGREFNUMS = FINDINGREFNUMS - - def test_replace_empty_fields(self): - findings = [ - self.Finding( - MODIFIEDOPINION="", - OTHERNONCOMPLIANCE="", - MATERIALWEAKNESS="Present", - SIGNIFICANTDEFICIENCY="", - OTHERFINDINGS="Present", - QCOSTS="", - FINDINGREFNUMS="", - ), - self.Finding( - MODIFIEDOPINION="Present", - OTHERNONCOMPLIANCE="Present", - MATERIALWEAKNESS="", - SIGNIFICANTDEFICIENCY="", - OTHERFINDINGS="Present", - QCOSTS="", - FINDINGREFNUMS="Present", - ), - self.Finding( - MODIFIEDOPINION="", - OTHERNONCOMPLIANCE="Present", - MATERIALWEAKNESS="Present", - SIGNIFICANTDEFICIENCY="", - OTHERFINDINGS="", - QCOSTS="Present", - FINDINGREFNUMS="", - ), - ] - - xform_replace_required_fields_with_gsa_migration_when_empty(findings) - - self.assertEqual(findings[0].MODIFIEDOPINION, settings.GSA_MIGRATION) - self.assertEqual(findings[0].OTHERNONCOMPLIANCE, settings.GSA_MIGRATION) - self.assertEqual(findings[0].MATERIALWEAKNESS, "Present") - self.assertEqual(findings[0].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) - self.assertEqual(findings[0].OTHERFINDINGS, "Present") - self.assertEqual(findings[0].QCOSTS, settings.GSA_MIGRATION) - self.assertEqual(findings[0].FINDINGREFNUMS, settings.GSA_MIGRATION) - - self.assertEqual(findings[1].MODIFIEDOPINION, "Present") - self.assertEqual(findings[1].OTHERNONCOMPLIANCE, "Present") - self.assertEqual(findings[1].MATERIALWEAKNESS, settings.GSA_MIGRATION) - self.assertEqual(findings[1].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) - self.assertEqual(findings[1].OTHERFINDINGS, "Present") - self.assertEqual(findings[1].QCOSTS, settings.GSA_MIGRATION) - self.assertEqual(findings[1].FINDINGREFNUMS, "Present") - - self.assertEqual(findings[2].MODIFIEDOPINION, settings.GSA_MIGRATION) - self.assertEqual(findings[2].OTHERNONCOMPLIANCE, "Present") - self.assertEqual(findings[2].MATERIALWEAKNESS, "Present") - self.assertEqual(findings[2].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) - self.assertEqual(findings[2].OTHERFINDINGS, settings.GSA_MIGRATION) - self.assertEqual(findings[2].QCOSTS, "Present") - self.assertEqual(findings[2].FINDINGREFNUMS, settings.GSA_MIGRATION) - - -class TestXformMissingRepeatPriorReference(SimpleTestCase): - class Finding: - def __init__(self, PRIORFINDINGREFNUMS, REPEATFINDING): - self.PRIORFINDINGREFNUMS = PRIORFINDINGREFNUMS - self.REPEATFINDING = REPEATFINDING - - def test_replace_empty_repeat_finding(self): - findings = [ - self.Finding(PRIORFINDINGREFNUMS="123", REPEATFINDING=""), - self.Finding(PRIORFINDINGREFNUMS="", REPEATFINDING=""), - self.Finding(PRIORFINDINGREFNUMS="456", REPEATFINDING="N"), - self.Finding(PRIORFINDINGREFNUMS="", REPEATFINDING="Y"), - ] - - xform_empty_repeat_prior_reference(findings) - - self.assertEqual(findings[0].REPEATFINDING, "Y") - self.assertEqual(findings[1].REPEATFINDING, "N") - self.assertEqual(findings[2].REPEATFINDING, "N") - self.assertEqual(findings[3].REPEATFINDING, "Y") +from django.conf import settings +from django.test import SimpleTestCase + +from census_historical_migration.invalid_migration_tags import INVALID_MIGRATION_TAGS +from census_historical_migration.invalid_record import InvalidRecord + +from .workbooklib.findings_text import ( + xform_add_placeholder_for_missing_text_of_finding, + xform_add_placeholder_for_missing_references, +) + +from .workbooklib.findings import ( + has_duplicate_ref_numbers, + track_invalid_records_with_repeated_ref_numbers, + xform_empty_repeat_prior_reference, + xform_replace_required_fields_with_gsa_migration_when_empty, + xform_sort_compliance_requirement, + xform_missing_compliance_requirement, +) + + +class TestXformSortComplianceRequirement(SimpleTestCase): + class Finding: + def __init__(self, typerequirement): + self.TYPEREQUIREMENT = typerequirement + + def test_sort_and_uppercase(self): + """Test that strings are sorted and uppercased.""" + findings = [self.Finding("abc"), self.Finding(" fca"), self.Finding("Bac")] + expected_results = ["ABC", "ACF", "ABC"] + + xform_sort_compliance_requirement(findings) + + for finding, expected in zip(findings, expected_results): + self.assertEqual(finding.TYPEREQUIREMENT, expected) + + def test_empty_string(self): + """Test that an empty string is not changed.""" + findings = [self.Finding("")] + xform_sort_compliance_requirement(findings) + self.assertEqual(findings[0].TYPEREQUIREMENT, "") + + def test_no_change_needed(self): + """Test that a string that is already sorted and uppercased is not changed.""" + findings = [self.Finding("ABC")] + xform_sort_compliance_requirement(findings) + self.assertEqual(findings[0].TYPEREQUIREMENT, "ABC") + + def test_with_spaces_and_case(self): + """Test that strings with spaces and mixed case are sorted and uppercased.""" + findings = [self.Finding(" aBc "), self.Finding("CBA "), self.Finding("bAc ")] + expected_results = ["ABC", "ABC", "ABC"] + + xform_sort_compliance_requirement(findings) + + for finding, expected in zip(findings, expected_results): + self.assertEqual(finding.TYPEREQUIREMENT, expected) + + +class TestXformAddPlaceholderForMissingFindingsText(SimpleTestCase): + class Findings: + def __init__(self, refnum): + self.FINDINGREFNUMS = refnum + + class FindingsText: + def __init__(self, refnum, seqnum=None, text=None, chartstables=None): + self.FINDINGREFNUMS = refnum + self.SEQ_NUMBER = seqnum + self.TEXT = text + self.CHARTSTABLES = chartstables + + def test_placeholder_addition_for_missing_references(self): + # Setup specific test data + mock_findings = [ + self.Findings("ref1"), + self.Findings("ref2"), + ] + mock_findings_texts = [ + self.FindingsText("ref1", "1", "text1", "chart1"), + ] + + findings_texts = xform_add_placeholder_for_missing_references( + mock_findings, mock_findings_texts + ) + + self.assertEqual( + len(findings_texts), 2 + ) # Expecting two items in findings_texts + self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") + self.assertEqual(findings_texts[1].TEXT, settings.GSA_MIGRATION) + self.assertEqual(findings_texts[1].CHARTSTABLES, settings.GSA_MIGRATION) + + def test_no_placeholder_addition_when_all_references_present(self): + # Setup specific test data + mock_findings = [ + self.Findings("ref1"), + self.Findings("ref2"), + ] + mock_findings_texts = [ + self.FindingsText("ref1", "1", "text1", "chart1"), + self.FindingsText("ref2", "2", "text2", "chart2"), + ] + + findings_texts = xform_add_placeholder_for_missing_references( + mock_findings, mock_findings_texts + ) + + self.assertEqual( + len(findings_texts), 2 + ) # Expecting two items in findings_texts + self.assertEqual(findings_texts[0].FINDINGREFNUMS, "ref1") + self.assertEqual(findings_texts[1].FINDINGREFNUMS, "ref2") + + +class TestXformAddPlaceholderForMissingTextOfFinding(SimpleTestCase): + + class FindingsText: + def __init__(self, FINDINGREFNUMS=None, TEXT=None): + self.FINDINGREFNUMS = FINDINGREFNUMS + self.TEXT = TEXT + + def test_add_placeholder_to_empty_text(self): + findings_texts = [self.FindingsText(FINDINGREFNUMS="123", TEXT="")] + expected_text = settings.GSA_MIGRATION + xform_add_placeholder_for_missing_text_of_finding(findings_texts) + self.assertEqual( + findings_texts[0].TEXT, + expected_text, + "The TEXT field should have the placeholder text.", + ) + + def test_no_placeholder_if_text_present(self): + findings_texts = [self.FindingsText(FINDINGREFNUMS="123", TEXT="Existing text")] + expected_text = "Existing text" + xform_add_placeholder_for_missing_text_of_finding(findings_texts) + self.assertEqual( + findings_texts[0].TEXT, + expected_text, + "The TEXT field should not be modified.", + ) + + def test_empty_finding_refnums_no_change(self): + findings_texts = [self.FindingsText(FINDINGREFNUMS="", TEXT="")] + xform_add_placeholder_for_missing_text_of_finding(findings_texts) + self.assertEqual( + findings_texts[0].TEXT, + "", + "The TEXT field should remain empty if FINDINGREFNUMS is empty.", + ) + + +class TestXformMissingComplianceRequirement(SimpleTestCase): + class Findings: + def __init__(self, type_requirement): + self.TYPEREQUIREMENT = type_requirement + + def test_missing_compliance_requirement(self): + mock_findings = [self.Findings("")] + + xform_missing_compliance_requirement(mock_findings) + + self.assertEqual(mock_findings[0].TYPEREQUIREMENT, settings.GSA_MIGRATION) + + def test_normal_compliance_requirement(self): + mock_findings = [self.Findings("ABC")] + + xform_missing_compliance_requirement(mock_findings) + + self.assertEqual(mock_findings[0].TYPEREQUIREMENT, "ABC") + + +class TestHasDuplicateRefNumbers(SimpleTestCase): + + class Finding: + def __init__(self, FINDINGREFNUMS, ELECAUDITSID): + self.FINDINGREFNUMS = FINDINGREFNUMS + self.ELECAUDITSID = ELECAUDITSID + + def test_no_duplicates(self): + """Test that no duplicates are found when there are no duplicate reference numbers.""" + findings = [self.Finding("ref1", "award1"), self.Finding("ref2", "award2")] + award_refs = [finding.ELECAUDITSID for finding in findings] + result = has_duplicate_ref_numbers(award_refs, findings) + self.assertFalse(result) + + def test_with_duplicates(self): + """Test that duplicates are found when there are duplicate reference numbers.""" + findings = [self.Finding("ref1", "award1"), self.Finding("ref1", "award1")] + award_refs = [finding.ELECAUDITSID for finding in findings] + result = has_duplicate_ref_numbers(award_refs, findings) + self.assertTrue(result) + + def test_different_awards(self): + """Test that duplicates are not found when the reference numbers are the same but the awards are different.""" + findings = [self.Finding("ref1", "award1"), self.Finding("ref1", "award2")] + award_refs = [finding.ELECAUDITSID for finding in findings] + result = has_duplicate_ref_numbers(award_refs, findings) + self.assertFalse(result) + + +class TestTrackInvalidRecordsWithRepeatedRefNumbers(SimpleTestCase): + + class Finding: + def __init__(self, FINDINGREFNUMS, ELECAUDITSID): + self.FINDINGREFNUMS = FINDINGREFNUMS + self.ELECAUDITSID = ELECAUDITSID + + def test_track_invalid_records(self): + + InvalidRecord.reset() + findings = [self.Finding("ref1", "id1"), self.Finding("ref1", "id1")] + award_refs = [finding.ELECAUDITSID for finding in findings] + track_invalid_records_with_repeated_ref_numbers(award_refs, findings) + + self.assertEqual( + InvalidRecord.fields["finding"], + [ + [ + { + "census_data": [ + {"column": "FINDINGREFNUMS", "value": "ref1"}, + {"column": "ELECAUDITSID", "value": "id1"}, + ], + "gsa_fac_data": {"field": "reference_number", "value": "ref1"}, + }, + { + "census_data": [ + {"column": "FINDINGREFNUMS", "value": "ref1"}, + {"column": "ELECAUDITSID", "value": "id1"}, + ], + "gsa_fac_data": {"field": "reference_number", "value": "ref1"}, + }, + ] + ], + ) + + self.assertEqual( + InvalidRecord.fields["validations_to_skip"], + ["check_finding_reference_uniqueness"], + ) + self.assertEqual( + InvalidRecord.fields["invalid_migration_tag"], + [INVALID_MIGRATION_TAGS.DUPLICATE_FINDING_REFERENCE_NUMBERS], + ) + + def test_no_invalid_records(self): + """Test that no invalid records are tracked when there are no duplicate reference numbers.""" + findings = [self.Finding("ref1", "id1"), self.Finding("ref2", "id2")] + award_refs = [finding.ELECAUDITSID for finding in findings] + InvalidRecord.reset() + + track_invalid_records_with_repeated_ref_numbers(award_refs, findings) + + self.assertEqual(InvalidRecord.fields["finding"], []) + self.assertEqual(InvalidRecord.fields["validations_to_skip"], []) + + +class TestXformReplaceRequiredFields(SimpleTestCase): + class Finding: + + def __init__( + self, + MODIFIEDOPINION, + OTHERNONCOMPLIANCE, + MATERIALWEAKNESS, + SIGNIFICANTDEFICIENCY, + OTHERFINDINGS, + QCOSTS, + FINDINGREFNUMS, + ): + self.MODIFIEDOPINION = MODIFIEDOPINION + self.OTHERNONCOMPLIANCE = OTHERNONCOMPLIANCE + self.MATERIALWEAKNESS = MATERIALWEAKNESS + self.SIGNIFICANTDEFICIENCY = SIGNIFICANTDEFICIENCY + self.OTHERFINDINGS = OTHERFINDINGS + self.QCOSTS = QCOSTS + self.FINDINGREFNUMS = FINDINGREFNUMS + + def test_replace_empty_fields(self): + findings = [ + self.Finding( + MODIFIEDOPINION="", + OTHERNONCOMPLIANCE="", + MATERIALWEAKNESS="Present", + SIGNIFICANTDEFICIENCY="", + OTHERFINDINGS="Present", + QCOSTS="", + FINDINGREFNUMS="", + ), + self.Finding( + MODIFIEDOPINION="Present", + OTHERNONCOMPLIANCE="Present", + MATERIALWEAKNESS="", + SIGNIFICANTDEFICIENCY="", + OTHERFINDINGS="Present", + QCOSTS="", + FINDINGREFNUMS="Present", + ), + self.Finding( + MODIFIEDOPINION="", + OTHERNONCOMPLIANCE="Present", + MATERIALWEAKNESS="Present", + SIGNIFICANTDEFICIENCY="", + OTHERFINDINGS="", + QCOSTS="Present", + FINDINGREFNUMS="", + ), + ] + + xform_replace_required_fields_with_gsa_migration_when_empty(findings) + + self.assertEqual(findings[0].MODIFIEDOPINION, settings.GSA_MIGRATION) + self.assertEqual(findings[0].OTHERNONCOMPLIANCE, settings.GSA_MIGRATION) + self.assertEqual(findings[0].MATERIALWEAKNESS, "Present") + self.assertEqual(findings[0].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) + self.assertEqual(findings[0].OTHERFINDINGS, "Present") + self.assertEqual(findings[0].QCOSTS, settings.GSA_MIGRATION) + self.assertEqual(findings[0].FINDINGREFNUMS, settings.GSA_MIGRATION) + + self.assertEqual(findings[1].MODIFIEDOPINION, "Present") + self.assertEqual(findings[1].OTHERNONCOMPLIANCE, "Present") + self.assertEqual(findings[1].MATERIALWEAKNESS, settings.GSA_MIGRATION) + self.assertEqual(findings[1].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) + self.assertEqual(findings[1].OTHERFINDINGS, "Present") + self.assertEqual(findings[1].QCOSTS, settings.GSA_MIGRATION) + self.assertEqual(findings[1].FINDINGREFNUMS, "Present") + + self.assertEqual(findings[2].MODIFIEDOPINION, settings.GSA_MIGRATION) + self.assertEqual(findings[2].OTHERNONCOMPLIANCE, "Present") + self.assertEqual(findings[2].MATERIALWEAKNESS, "Present") + self.assertEqual(findings[2].SIGNIFICANTDEFICIENCY, settings.GSA_MIGRATION) + self.assertEqual(findings[2].OTHERFINDINGS, settings.GSA_MIGRATION) + self.assertEqual(findings[2].QCOSTS, "Present") + self.assertEqual(findings[2].FINDINGREFNUMS, settings.GSA_MIGRATION) + + +class TestXformMissingRepeatPriorReference(SimpleTestCase): + class Finding: + def __init__(self, PRIORFINDINGREFNUMS, REPEATFINDING): + self.PRIORFINDINGREFNUMS = PRIORFINDINGREFNUMS + self.REPEATFINDING = REPEATFINDING + + def test_replace_empty_repeat_finding(self): + findings = [ + self.Finding(PRIORFINDINGREFNUMS="123", REPEATFINDING=""), + self.Finding(PRIORFINDINGREFNUMS="", REPEATFINDING=""), + self.Finding(PRIORFINDINGREFNUMS="456", REPEATFINDING="N"), + self.Finding(PRIORFINDINGREFNUMS="", REPEATFINDING="Y"), + ] + + xform_empty_repeat_prior_reference(findings) + + self.assertEqual(findings[0].REPEATFINDING, "Y") + self.assertEqual(findings[1].REPEATFINDING, "N") + self.assertEqual(findings[2].REPEATFINDING, "N") + self.assertEqual(findings[3].REPEATFINDING, "Y") diff --git a/backend/census_historical_migration/test_general_information_xforms.py b/backend/census_historical_migration/test_general_information_xforms.py index a1084dd1ab..9139888cdc 100644 --- a/backend/census_historical_migration/test_general_information_xforms.py +++ b/backend/census_historical_migration/test_general_information_xforms.py @@ -1,605 +1,605 @@ -from datetime import datetime, timedelta -import json -from unittest.mock import mock_open, patch -from django.conf import settings -from django.test import SimpleTestCase - -from .sac_general_lib.general_information import ( - AUDIT_TYPE_DICT, - PERIOD_DICT, - is_uei_valid, - xform_audit_period_covered, - xform_audit_type, - xform_auditee_fiscal_period_end, - xform_auditee_fiscal_period_start, - xform_country_v2, - xform_entity_type, - xform_replace_empty_auditee_contact_name, - xform_replace_empty_auditee_contact_title, - xform_replace_empty_auditor_email, - xform_replace_empty_auditee_email, - xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration, - xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration, - xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration, - xform_replace_empty_zips, - xform_audit_period_other_months, -) -from .exception_utils import ( - DataMigrationError, - DataMigrationValueError, -) - - -class TestXformEntityType(SimpleTestCase): - def test_valid_phrases(self): - """Test that the function returns the correct results when given valid phrases.""" - self.assertEqual( - xform_entity_type("institution of higher education"), "higher-ed" - ) - self.assertEqual(xform_entity_type("nonprofit"), "non-profit") - self.assertEqual(xform_entity_type("Non-Profit"), "non-profit") - self.assertEqual(xform_entity_type("local government"), "local") - self.assertEqual(xform_entity_type("state"), "state") - self.assertEqual(xform_entity_type("unknown"), "unknown") - self.assertEqual(xform_entity_type("tribe"), "tribal") - self.assertEqual(xform_entity_type("tribal"), "tribal") - - def test_valid_phrases_with_extra_whitespace(self): - """Test that the function returns the correct results when given valid phrases with extra whitespace.""" - self.assertEqual( - xform_entity_type("institution of higher education "), "higher-ed" - ) - self.assertEqual(xform_entity_type(" nonprofit "), "non-profit") - self.assertEqual(xform_entity_type("Non-Profit "), "non-profit") - self.assertEqual(xform_entity_type(" local government "), "local") - self.assertEqual(xform_entity_type(" state "), "state") - - def test_valid_phrases_with_extra_whitespace_with_capitalization(self): - """Test that the function returns the correct results when given valid phrases with extra whitespace and upper cases.""" - self.assertEqual( - xform_entity_type("Institution of Higher Education "), "higher-ed" - ) - self.assertEqual(xform_entity_type(" Nonprofit "), "non-profit") - self.assertEqual(xform_entity_type("Non-Profit "), "non-profit") - self.assertEqual(xform_entity_type(" LOCAL Government "), "local") - self.assertEqual(xform_entity_type(" STATE "), "state") - self.assertEqual(xform_entity_type(" Tribal "), "tribal") - - def test_valid_phrases_with_extra_whitespace_with_extra_words_with_capitalization( - self, - ): - """Test that the function returns the correct results when given valid phrases with extra whitespace and extra words.""" - self.assertEqual( - xform_entity_type("Institution of Higher Education (IHE) and Research"), - "higher-ed", - ) - - self.assertEqual( - xform_entity_type(" LOCAL GOVERNMENT AND AGENCIES "), - "local", - ) - self.assertEqual( - xform_entity_type(" STATE and Non-Governmental Organization "), "state" - ) - self.assertEqual( - xform_entity_type(" Indian tribe or Tribal Organization "), "tribal" - ) - - def test_invalid_phrase(self): - """Test that the function raises an exception when given an invalid phrase.""" - with self.assertRaises(DataMigrationError): - xform_entity_type("business") - - def test_empty_string(self): - """Test that the function raises an exception when given an empty string.""" - with self.assertRaises(DataMigrationError): - xform_entity_type("") - - def test_none_input(self): - """Test that the function raises an exception when given None.""" - with self.assertRaises(DataMigrationError): - xform_entity_type(None) - - -class TestXformCountry(SimpleTestCase): - class MockAuditHeader: - def __init__(self, CPASTATE): - self.CPASTATE = CPASTATE - self.CPASTREET1 = "" - self.CPACITY = "" - self.CPAZIPCODE = "" - self.CPAFOREIGN = "" - - def setUp(self): - self.general_information = { - "auditor_country": "", - } - self.audit_header = self.MockAuditHeader("") - - def test_when_auditor_country_set_to_us(self): - """Test that the function returns the correct results when the auditor country is set to US.""" - self.general_information["auditor_country"] = "US" - result = xform_country_v2(self.general_information, self.audit_header) - self.assertEqual(result["auditor_country"], "USA") - - def test_when_auditor_country_set_to_usa(self): - """Test that the function returns the correct results when the auditor country is set to USA.""" - self.general_information["auditor_country"] = "USA" - result = xform_country_v2(self.general_information, self.audit_header) - self.assertEqual(result["auditor_country"], "USA") - - def test_when_auditor_country_set_to_empty_string_and_auditor_state_valid(self): - """Test that the function returns the correct results when the auditor country is set to an empty string.""" - self.general_information["auditor_country"] = "" - self.audit_header.CPASTATE = "MA" - result = xform_country_v2(self.general_information, self.audit_header) - self.assertEqual(result["auditor_country"], "USA") - - def test_when_auditor_country_set_to_empty_string_and_auditor_state_invalid(self): - """Test that the function raises an exception when the auditor country is set to an empty string and the auditor state is invalid.""" - self.general_information["auditor_country"] = "" - self.audit_header.CPASTATE = "XX" - with self.assertRaises(DataMigrationError): - xform_country_v2(self.general_information, self.audit_header) - - def test_when_auditor_country_set_to_non_us(self): - """Test that the function returns the correct results when the auditor country is set to NON-US.""" - self.general_information["auditor_country"] = "NON-US" - self.audit_header.CPAFOREIGN = "Some foreign address" - result = xform_country_v2(self.general_information, self.audit_header) - self.assertEqual(result["auditor_country"], "non-USA") - self.assertEqual( - result["auditor_international_address"], "Some foreign address" - ) - - def test_when_auditor_country_set_to_non_us_and_state_set(self): - """Test that the function raises an exception when the auditor country is NON-US and the auditor state is set.""" - self.general_information["auditor_country"] = "NON-US" - self.audit_header.CPASTATE = "MA" - with self.assertRaises(DataMigrationError): - xform_country_v2(self.general_information, self.audit_header) - - -class TestXformAuditeeFiscalPeriodEnd(SimpleTestCase): - def setUp(self): - self.general_information = { - "auditee_fiscal_period_end": "01/31/2021 00:00:00", - } - - def test_when_auditee_fiscal_period_end_is_valid(self): - """Test that the function returns the correct results when the fiscal period end is valid.""" - result = xform_auditee_fiscal_period_end(self.general_information) - self.assertEqual(result["auditee_fiscal_period_end"], "2021-01-31") - - def test_when_auditee_fiscal_period_end_is_invalid(self): - """Test that the function raises an exception when the fiscal period end is invalid.""" - self.general_information["auditee_fiscal_period_end"] = "01/31/2021" - with self.assertRaises(DataMigrationValueError): - xform_auditee_fiscal_period_end(self.general_information) - self.general_information["auditee_fiscal_period_end"] = "" - with self.assertRaises(DataMigrationError): - xform_auditee_fiscal_period_end(self.general_information) - - -class TestXformAuditeeFiscalPeriodStart(SimpleTestCase): - def setUp(self): - self.general_information = { - "auditee_fiscal_period_end": "01/31/2021 00:00:00", - } - - def test_when_auditee_fiscal_period_end_is_valid(self): - """Test that the function returns the correct results when the fiscal period end is valid.""" - result = xform_auditee_fiscal_period_start(self.general_information) - expected_date = ( - datetime.strptime( - self.general_information["auditee_fiscal_period_end"], - "%m/%d/%Y %H:%M:%S", - ) - - timedelta(days=365) - ).strftime("%Y-%m-%d") - self.assertEqual(result["auditee_fiscal_period_start"], expected_date) - - def test_when_auditee_fiscal_period_end_is_invalid(self): - """Test that the function raises an exception when the fiscal period end is invalid.""" - self.general_information["auditee_fiscal_period_end"] = "01/31/2021" - with self.assertRaises(DataMigrationValueError): - xform_auditee_fiscal_period_start(self.general_information) - self.general_information["auditee_fiscal_period_end"] = "" - with self.assertRaises(DataMigrationValueError): - xform_auditee_fiscal_period_start(self.general_information) - - -class TestXformAuditPeriodCovered(SimpleTestCase): - def test_valid_period(self): - for key, value in PERIOD_DICT.items(): - with self.subTest(key=key): - general_information = {"audit_period_covered": key} - result = xform_audit_period_covered(general_information) - self.assertEqual(result["audit_period_covered"], value) - - def test_invalid_period(self): - general_information = {"audit_period_covered": "invalid_key"} - with self.assertRaises(DataMigrationError): - xform_audit_period_covered(general_information) - - def test_missing_period(self): - general_information = {} - with self.assertRaises(DataMigrationError): - xform_audit_period_covered(general_information) - - -class TestXformAuditType(SimpleTestCase): - def test_valid_audit_type(self): - for key, value in AUDIT_TYPE_DICT.items(): - if value != "alternative-compliance-engagement": - with self.subTest(key=key): - general_information = {"audit_type": key} - result = xform_audit_type(general_information) - self.assertEqual(result["audit_type"], value) - - def test_invalid_audit_type(self): - general_information = {"audit_type": "invalid_key"} - with self.assertRaises(DataMigrationError): - xform_audit_type(general_information) - - def test_ace_audit_type(self): - # audit type "alternative-compliance-engagement" is not supported at this time. - general_information = {"audit_type": AUDIT_TYPE_DICT["A"]} - with self.assertRaises(DataMigrationError): - xform_audit_type(general_information) - - def test_missing_audit_type(self): - general_information = {} - with self.assertRaises(DataMigrationError): - xform_audit_type(general_information) - - -class TestXformReplaceEmptyAuditorEmail(SimpleTestCase): - def test_empty_auditor_email(self): - """Test that an empty auditor_email is replaced with 'GSA_MIGRATION'""" - input_data = {"auditor_email": ""} - expected_output = {"auditor_email": settings.GSA_MIGRATION} - self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) - - def test_non_empty_auditor_email(self): - """Test that a non-empty auditor_email remains unchanged""" - input_data = {"auditor_email": "test@example.com"} - expected_output = {"auditor_email": "test@example.com"} - self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) - - def test_missing_auditor_email(self): - """Test that a missing auditor_email key is added and set to 'GSA_MIGRATION'""" - input_data = {} - expected_output = {"auditor_email": settings.GSA_MIGRATION} - self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) - - -class TestXformReplaceEmptyAuditeeEmail(SimpleTestCase): - def test_empty_auditee_email(self): - """Test that an empty auditee_email is replaced with 'GSA_MIGRATION'""" - input_data = {"auditee_email": ""} - expected_output = {"auditee_email": settings.GSA_MIGRATION} - self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) - - def test_non_empty_auditee_email(self): - """Test that a non-empty auditee_email remains unchanged""" - input_data = {"auditee_email": "test@example.com"} - expected_output = {"auditee_email": "test@example.com"} - self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) - - def test_missing_auditee_email(self): - """Test that a missing auditee_email key is added and set to 'GSA_MIGRATION'""" - input_data = {} - expected_output = {"auditee_email": settings.GSA_MIGRATION} - self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) - - -class TestXformReplaceEmptyAuditeeContactName(SimpleTestCase): - def test_empty_auditee_contact_name(self): - """Test that an empty auditee_contact_name is replaced with 'GSA_MIGRATION'""" - input_data = {"auditee_contact_name": ""} - expected_output = {"auditee_contact_name": settings.GSA_MIGRATION} - self.assertEqual( - xform_replace_empty_auditee_contact_name(input_data), expected_output - ) - - def test_non_empty_auditee_contact_name(self): - """Test that a non-empty auditee_contact_name remains unchanged""" - input_data = {"auditee_contact_name": "test"} - expected_output = {"auditee_contact_name": "test"} - self.assertEqual( - xform_replace_empty_auditee_contact_name(input_data), expected_output - ) - - def test_missing_auditee_contact_name(self): - """Test that a missing auditee_contact_name key is added and set to 'GSA_MIGRATION'""" - input_data = {} - expected_output = {"auditee_contact_name": settings.GSA_MIGRATION} - self.assertEqual( - xform_replace_empty_auditee_contact_name(input_data), expected_output - ) - - -class TestXformReplaceEmptyAuditeeContactTitle(SimpleTestCase): - def test_empty_auditee_contact_title(self): - """Test that an empty auditee_contact_title is replaced with 'GSA_MIGRATION'""" - input_data = {"auditee_contact_title": ""} - expected_output = {"auditee_contact_title": settings.GSA_MIGRATION} - self.assertEqual( - xform_replace_empty_auditee_contact_title(input_data), expected_output - ) - - def test_non_empty_auditee_contact_title(self): - """Test that a non-empty auditee_contact_title remains unchanged""" - input_data = {"auditee_contact_title": "test"} - expected_output = {"auditee_contact_title": "test"} - self.assertEqual( - xform_replace_empty_auditee_contact_title(input_data), expected_output - ) - - def test_missing_auditee_contact_title(self): - """Test that a missing auditee_contact_title key is added and set to 'GSA_MIGRATION'""" - input_data = {} - expected_output = {"auditee_contact_title": settings.GSA_MIGRATION} - self.assertEqual( - xform_replace_empty_auditee_contact_title(input_data), expected_output - ) - - -class TestXformReplaceEmptyOrInvalidUEIs(SimpleTestCase): - class MockAuditHeader: - def __init__(self, UEI): - self.UEI = UEI - - def setUp(self): - self.audit_header = self.MockAuditHeader("") - self.valid_uei = "ZQGGHJH74DW7" - self.invalid_uei = "123" - self.uei_schema = { - "oneOf": [ - { - "allOf": [ - {"maxLength": 12, "minLength": 12}, - {"pattern": "^[A-HJ-NP-Z1-9][A-HJ-NP-Z0-9]+$"}, - { - "pattern": "^(?![A-HJ-NP-Z1-9]+[A-HJ-NP-Z0-9]*?[0-9]{9})[A-HJ-NP-Z0-9]*$" - }, - {"pattern": "^(?![0-9]{9})"}, - ], - "type": "string", - }, - {"const": "GSA_MIGRATION", "type": "string"}, - ] - } - - def test_auditee_uei_valid(self): - """Test that valid auditee EIN is not replaced.""" - self.audit_header.UEI = "ZQGGHJH74DW7" - result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( - self.audit_header - ) - print(result) - self.assertEqual(result.UEI, "ZQGGHJH74DW7") - - def test_auditee_uei_invalid_replaced(self): - """Test that invalid auditee UEI is replaced.""" - self.audit_header.UEI = "invalid_uei" - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( - self.audit_header - ) - mock_track.assert_called_once_with( - "UEI", - "invalid_uei", - "auditee_uei", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration", - ) - self.assertEqual(result.UEI, settings.GSA_MIGRATION) - - def test_auditee_uei_empty_replaced(self): - """Test that empty auditee UEI is replaced.""" - self.audit_header.UEI = "auditee_uei" - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( - self.audit_header - ) - mock_track.assert_called_once() - self.assertEqual(result.UEI, settings.GSA_MIGRATION) - - @patch("builtins.open", side_effect=FileNotFoundError) - @patch( - "census_historical_migration.sac_general_lib.general_information.settings.OUTPUT_BASE_DIR", - "some/dir", - ) - def test_missing_schema_file(self, mock_open): - with self.assertRaises(DataMigrationError) as context: - is_uei_valid(self.valid_uei) - self.assertIn( - "UeiSchema.json file not found in some/dir", str(context.exception) - ) - - @patch("builtins.open", new_callable=mock_open, read_data="invalid json") - @patch( - "json.load", - side_effect=json.decoder.JSONDecodeError( - "Expecting value", "line 1 column 1 (char 0)", 0 - ), - ) - @patch( - "census_historical_migration.sac_general_lib.general_information.settings.OUTPUT_BASE_DIR", - "some/dir", - ) - def test_invalid_json_schema_file(self, mock_json_load, mock_open): - with self.assertRaises(DataMigrationError) as context: - is_uei_valid(self.valid_uei) - self.assertIn( - "UeiSchema.json file contains invalid JSON", str(context.exception) - ) - - -class TestXformReplaceEmptyOrInvalidEins(SimpleTestCase): - def test_auditor_ein_valid(self): - """Test that valid auditor EIN is not replaced.""" - info = {"auditor_ein": "123456789"} - result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) - self.assertEqual(result["auditor_ein"], "123456789") - - def test_auditor_ein_invalid_replaced(self): - """Test that invalid auditor EIN is replaced.""" - info = {"auditor_ein": "invalid_ein"} - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) - mock_track.assert_called_once_with( - "AUDITOR_EIN", - "invalid_ein", - "auditor_ein", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration", - ) - self.assertEqual(result["auditor_ein"], settings.GSA_MIGRATION) - - def test_auditor_ein_empty_replaced(self): - """Test that empty auditor EIN is replaced.""" - info = {"auditor_ein": ""} - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) - mock_track.assert_called_once() - self.assertEqual(result["auditor_ein"], settings.GSA_MIGRATION) - - def test_auditee_ein_valid(self): - """Test that valid auditee EIN is not replaced.""" - info = {"ein": "123456789"} - result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) - self.assertEqual(result["ein"], "123456789") - - def test_auditee_ein_invalid_replaced(self): - """Test that invalid auditee EIN is replaced.""" - info = {"ein": "invalid_ein"} - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) - mock_track.assert_called_once_with( - "EIN", - "invalid_ein", - "auditee_ein", - settings.GSA_MIGRATION, - "xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration", - ) - self.assertEqual(result["ein"], settings.GSA_MIGRATION) - - def test_auditee_ein_empty_replaced(self): - """Test that empty auditee EIN is replaced.""" - info = {"ein": ""} - with patch( - "census_historical_migration.sac_general_lib.general_information.track_transformations" - ) as mock_track: - result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) - mock_track.assert_called_once() - self.assertEqual(result["ein"], settings.GSA_MIGRATION) - - -class TestXformReplaceEmptyAuditorZip(SimpleTestCase): - def test_empty_auditor_zip(self): - """Test that an empty US auditor_zip and auditee_zip are replaced with 'GSA_MIGRATION'""" - input_data = { - "auditee_zip": "", - "auditor_country": "USA", - "auditor_zip": "", - } - expected_output = { - "auditee_zip": settings.GSA_MIGRATION, - "auditor_country": "USA", - "auditor_zip": settings.GSA_MIGRATION, - } - self.assertEqual(xform_replace_empty_zips(input_data), expected_output) - - def test_empty_non_us_auditor_zip(self): - """Test that an empty non-US auditor_zip is not replaced with 'GSA_MIGRATION'""" - input_data = { - "auditee_zip": "", - "auditor_country": "non-USA", - "auditor_zip": "", - } - expected_output = { - "auditee_zip": settings.GSA_MIGRATION, - "auditor_country": "non-USA", - "auditor_zip": "", - } - self.assertEqual(xform_replace_empty_zips(input_data), expected_output) - - def test_non_empty_auditor_zip(self): - """Test that a non-empty US auditor_zip and auditee_zip remain unchanged""" - input_data = { - "auditee_zip": "10108", - "auditor_country": "USA", - "auditor_zip": "12345", - } - self.assertEqual(xform_replace_empty_zips(input_data), input_data) - - -class TestXformAuditPeriodOtherMonths(SimpleTestCase): - class MockAuditHeader: - def __init__(self, PERIODCOVERED, NUMBERMONTHS): - self.PERIODCOVERED = PERIODCOVERED - self.NUMBERMONTHS = NUMBERMONTHS - - def setUp(self): - self.audit_header = self.MockAuditHeader("", "") - - def test_periodcovered_other_zfill(self): - """Test that audit_period_other_months is set to NUMBERMONTHS with padded zeroes""" - self.audit_header.PERIODCOVERED = "O" - self.audit_header.NUMBERMONTHS = "6" - general_information = { - "audit_period_other_months": "", - } - expected_output = { - "audit_period_other_months": "06", - } - xform_audit_period_other_months(general_information, self.audit_header) - self.assertEqual( - general_information, - expected_output, - ) - - def test_periodcovered_other_no_zfill(self): - """Test that audit_period_other_months is set to NUMBERMONTHS without padded zeroes""" - self.audit_header.PERIODCOVERED = "O" - self.audit_header.NUMBERMONTHS = "14" - general_information = { - "audit_period_other_months": "", - } - expected_output = { - "audit_period_other_months": "14", - } - xform_audit_period_other_months(general_information, self.audit_header) - self.assertEqual( - general_information, - expected_output, - ) - - def test_periodcovered_not_other(self): - """Test that audit_period_other_months is not set""" - self.audit_header.PERIODCOVERED = "A" - self.audit_header.NUMBERMONTHS = "10" - general_information = { - "audit_period_other_months": "", - } - expected_output = { - "audit_period_other_months": "", - } - xform_audit_period_other_months(general_information, self.audit_header) - self.assertEqual( - general_information, - expected_output, - ) +from datetime import datetime, timedelta +import json +from unittest.mock import mock_open, patch +from django.conf import settings +from django.test import SimpleTestCase + +from .sac_general_lib.general_information import ( + AUDIT_TYPE_DICT, + PERIOD_DICT, + is_uei_valid, + xform_audit_period_covered, + xform_audit_type, + xform_auditee_fiscal_period_end, + xform_auditee_fiscal_period_start, + xform_country_v2, + xform_entity_type, + xform_replace_empty_auditee_contact_name, + xform_replace_empty_auditee_contact_title, + xform_replace_empty_auditor_email, + xform_replace_empty_auditee_email, + xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration, + xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration, + xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration, + xform_replace_empty_zips, + xform_audit_period_other_months, +) +from .exception_utils import ( + DataMigrationError, + DataMigrationValueError, +) + + +class TestXformEntityType(SimpleTestCase): + def test_valid_phrases(self): + """Test that the function returns the correct results when given valid phrases.""" + self.assertEqual( + xform_entity_type("institution of higher education"), "higher-ed" + ) + self.assertEqual(xform_entity_type("nonprofit"), "non-profit") + self.assertEqual(xform_entity_type("Non-Profit"), "non-profit") + self.assertEqual(xform_entity_type("local government"), "local") + self.assertEqual(xform_entity_type("state"), "state") + self.assertEqual(xform_entity_type("unknown"), "unknown") + self.assertEqual(xform_entity_type("tribe"), "tribal") + self.assertEqual(xform_entity_type("tribal"), "tribal") + + def test_valid_phrases_with_extra_whitespace(self): + """Test that the function returns the correct results when given valid phrases with extra whitespace.""" + self.assertEqual( + xform_entity_type("institution of higher education "), "higher-ed" + ) + self.assertEqual(xform_entity_type(" nonprofit "), "non-profit") + self.assertEqual(xform_entity_type("Non-Profit "), "non-profit") + self.assertEqual(xform_entity_type(" local government "), "local") + self.assertEqual(xform_entity_type(" state "), "state") + + def test_valid_phrases_with_extra_whitespace_with_capitalization(self): + """Test that the function returns the correct results when given valid phrases with extra whitespace and upper cases.""" + self.assertEqual( + xform_entity_type("Institution of Higher Education "), "higher-ed" + ) + self.assertEqual(xform_entity_type(" Nonprofit "), "non-profit") + self.assertEqual(xform_entity_type("Non-Profit "), "non-profit") + self.assertEqual(xform_entity_type(" LOCAL Government "), "local") + self.assertEqual(xform_entity_type(" STATE "), "state") + self.assertEqual(xform_entity_type(" Tribal "), "tribal") + + def test_valid_phrases_with_extra_whitespace_with_extra_words_with_capitalization( + self, + ): + """Test that the function returns the correct results when given valid phrases with extra whitespace and extra words.""" + self.assertEqual( + xform_entity_type("Institution of Higher Education (IHE) and Research"), + "higher-ed", + ) + + self.assertEqual( + xform_entity_type(" LOCAL GOVERNMENT AND AGENCIES "), + "local", + ) + self.assertEqual( + xform_entity_type(" STATE and Non-Governmental Organization "), "state" + ) + self.assertEqual( + xform_entity_type(" Indian tribe or Tribal Organization "), "tribal" + ) + + def test_invalid_phrase(self): + """Test that the function raises an exception when given an invalid phrase.""" + with self.assertRaises(DataMigrationError): + xform_entity_type("business") + + def test_empty_string(self): + """Test that the function raises an exception when given an empty string.""" + with self.assertRaises(DataMigrationError): + xform_entity_type("") + + def test_none_input(self): + """Test that the function raises an exception when given None.""" + with self.assertRaises(DataMigrationError): + xform_entity_type(None) + + +class TestXformCountry(SimpleTestCase): + class MockAuditHeader: + def __init__(self, CPASTATE): + self.CPASTATE = CPASTATE + self.CPASTREET1 = "" + self.CPACITY = "" + self.CPAZIPCODE = "" + self.CPAFOREIGN = "" + + def setUp(self): + self.general_information = { + "auditor_country": "", + } + self.audit_header = self.MockAuditHeader("") + + def test_when_auditor_country_set_to_us(self): + """Test that the function returns the correct results when the auditor country is set to US.""" + self.general_information["auditor_country"] = "US" + result = xform_country_v2(self.general_information, self.audit_header) + self.assertEqual(result["auditor_country"], "USA") + + def test_when_auditor_country_set_to_usa(self): + """Test that the function returns the correct results when the auditor country is set to USA.""" + self.general_information["auditor_country"] = "USA" + result = xform_country_v2(self.general_information, self.audit_header) + self.assertEqual(result["auditor_country"], "USA") + + def test_when_auditor_country_set_to_empty_string_and_auditor_state_valid(self): + """Test that the function returns the correct results when the auditor country is set to an empty string.""" + self.general_information["auditor_country"] = "" + self.audit_header.CPASTATE = "MA" + result = xform_country_v2(self.general_information, self.audit_header) + self.assertEqual(result["auditor_country"], "USA") + + def test_when_auditor_country_set_to_empty_string_and_auditor_state_invalid(self): + """Test that the function raises an exception when the auditor country is set to an empty string and the auditor state is invalid.""" + self.general_information["auditor_country"] = "" + self.audit_header.CPASTATE = "XX" + with self.assertRaises(DataMigrationError): + xform_country_v2(self.general_information, self.audit_header) + + def test_when_auditor_country_set_to_non_us(self): + """Test that the function returns the correct results when the auditor country is set to NON-US.""" + self.general_information["auditor_country"] = "NON-US" + self.audit_header.CPAFOREIGN = "Some foreign address" + result = xform_country_v2(self.general_information, self.audit_header) + self.assertEqual(result["auditor_country"], "non-USA") + self.assertEqual( + result["auditor_international_address"], "Some foreign address" + ) + + def test_when_auditor_country_set_to_non_us_and_state_set(self): + """Test that the function raises an exception when the auditor country is NON-US and the auditor state is set.""" + self.general_information["auditor_country"] = "NON-US" + self.audit_header.CPASTATE = "MA" + with self.assertRaises(DataMigrationError): + xform_country_v2(self.general_information, self.audit_header) + + +class TestXformAuditeeFiscalPeriodEnd(SimpleTestCase): + def setUp(self): + self.general_information = { + "auditee_fiscal_period_end": "01/31/2021 00:00:00", + } + + def test_when_auditee_fiscal_period_end_is_valid(self): + """Test that the function returns the correct results when the fiscal period end is valid.""" + result = xform_auditee_fiscal_period_end(self.general_information) + self.assertEqual(result["auditee_fiscal_period_end"], "2021-01-31") + + def test_when_auditee_fiscal_period_end_is_invalid(self): + """Test that the function raises an exception when the fiscal period end is invalid.""" + self.general_information["auditee_fiscal_period_end"] = "01/31/2021" + with self.assertRaises(DataMigrationValueError): + xform_auditee_fiscal_period_end(self.general_information) + self.general_information["auditee_fiscal_period_end"] = "" + with self.assertRaises(DataMigrationError): + xform_auditee_fiscal_period_end(self.general_information) + + +class TestXformAuditeeFiscalPeriodStart(SimpleTestCase): + def setUp(self): + self.general_information = { + "auditee_fiscal_period_end": "01/31/2021 00:00:00", + } + + def test_when_auditee_fiscal_period_end_is_valid(self): + """Test that the function returns the correct results when the fiscal period end is valid.""" + result = xform_auditee_fiscal_period_start(self.general_information) + expected_date = ( + datetime.strptime( + self.general_information["auditee_fiscal_period_end"], + "%m/%d/%Y %H:%M:%S", + ) + - timedelta(days=365) + ).strftime("%Y-%m-%d") + self.assertEqual(result["auditee_fiscal_period_start"], expected_date) + + def test_when_auditee_fiscal_period_end_is_invalid(self): + """Test that the function raises an exception when the fiscal period end is invalid.""" + self.general_information["auditee_fiscal_period_end"] = "01/31/2021" + with self.assertRaises(DataMigrationValueError): + xform_auditee_fiscal_period_start(self.general_information) + self.general_information["auditee_fiscal_period_end"] = "" + with self.assertRaises(DataMigrationValueError): + xform_auditee_fiscal_period_start(self.general_information) + + +class TestXformAuditPeriodCovered(SimpleTestCase): + def test_valid_period(self): + for key, value in PERIOD_DICT.items(): + with self.subTest(key=key): + general_information = {"audit_period_covered": key} + result = xform_audit_period_covered(general_information) + self.assertEqual(result["audit_period_covered"], value) + + def test_invalid_period(self): + general_information = {"audit_period_covered": "invalid_key"} + with self.assertRaises(DataMigrationError): + xform_audit_period_covered(general_information) + + def test_missing_period(self): + general_information = {} + with self.assertRaises(DataMigrationError): + xform_audit_period_covered(general_information) + + +class TestXformAuditType(SimpleTestCase): + def test_valid_audit_type(self): + for key, value in AUDIT_TYPE_DICT.items(): + if value != "alternative-compliance-engagement": + with self.subTest(key=key): + general_information = {"audit_type": key} + result = xform_audit_type(general_information) + self.assertEqual(result["audit_type"], value) + + def test_invalid_audit_type(self): + general_information = {"audit_type": "invalid_key"} + with self.assertRaises(DataMigrationError): + xform_audit_type(general_information) + + def test_ace_audit_type(self): + # audit type "alternative-compliance-engagement" is not supported at this time. + general_information = {"audit_type": AUDIT_TYPE_DICT["A"]} + with self.assertRaises(DataMigrationError): + xform_audit_type(general_information) + + def test_missing_audit_type(self): + general_information = {} + with self.assertRaises(DataMigrationError): + xform_audit_type(general_information) + + +class TestXformReplaceEmptyAuditorEmail(SimpleTestCase): + def test_empty_auditor_email(self): + """Test that an empty auditor_email is replaced with 'GSA_MIGRATION'""" + input_data = {"auditor_email": ""} + expected_output = {"auditor_email": settings.GSA_MIGRATION} + self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) + + def test_non_empty_auditor_email(self): + """Test that a non-empty auditor_email remains unchanged""" + input_data = {"auditor_email": "test@example.com"} + expected_output = {"auditor_email": "test@example.com"} + self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) + + def test_missing_auditor_email(self): + """Test that a missing auditor_email key is added and set to 'GSA_MIGRATION'""" + input_data = {} + expected_output = {"auditor_email": settings.GSA_MIGRATION} + self.assertEqual(xform_replace_empty_auditor_email(input_data), expected_output) + + +class TestXformReplaceEmptyAuditeeEmail(SimpleTestCase): + def test_empty_auditee_email(self): + """Test that an empty auditee_email is replaced with 'GSA_MIGRATION'""" + input_data = {"auditee_email": ""} + expected_output = {"auditee_email": settings.GSA_MIGRATION} + self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) + + def test_non_empty_auditee_email(self): + """Test that a non-empty auditee_email remains unchanged""" + input_data = {"auditee_email": "test@example.com"} + expected_output = {"auditee_email": "test@example.com"} + self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) + + def test_missing_auditee_email(self): + """Test that a missing auditee_email key is added and set to 'GSA_MIGRATION'""" + input_data = {} + expected_output = {"auditee_email": settings.GSA_MIGRATION} + self.assertEqual(xform_replace_empty_auditee_email(input_data), expected_output) + + +class TestXformReplaceEmptyAuditeeContactName(SimpleTestCase): + def test_empty_auditee_contact_name(self): + """Test that an empty auditee_contact_name is replaced with 'GSA_MIGRATION'""" + input_data = {"auditee_contact_name": ""} + expected_output = {"auditee_contact_name": settings.GSA_MIGRATION} + self.assertEqual( + xform_replace_empty_auditee_contact_name(input_data), expected_output + ) + + def test_non_empty_auditee_contact_name(self): + """Test that a non-empty auditee_contact_name remains unchanged""" + input_data = {"auditee_contact_name": "test"} + expected_output = {"auditee_contact_name": "test"} + self.assertEqual( + xform_replace_empty_auditee_contact_name(input_data), expected_output + ) + + def test_missing_auditee_contact_name(self): + """Test that a missing auditee_contact_name key is added and set to 'GSA_MIGRATION'""" + input_data = {} + expected_output = {"auditee_contact_name": settings.GSA_MIGRATION} + self.assertEqual( + xform_replace_empty_auditee_contact_name(input_data), expected_output + ) + + +class TestXformReplaceEmptyAuditeeContactTitle(SimpleTestCase): + def test_empty_auditee_contact_title(self): + """Test that an empty auditee_contact_title is replaced with 'GSA_MIGRATION'""" + input_data = {"auditee_contact_title": ""} + expected_output = {"auditee_contact_title": settings.GSA_MIGRATION} + self.assertEqual( + xform_replace_empty_auditee_contact_title(input_data), expected_output + ) + + def test_non_empty_auditee_contact_title(self): + """Test that a non-empty auditee_contact_title remains unchanged""" + input_data = {"auditee_contact_title": "test"} + expected_output = {"auditee_contact_title": "test"} + self.assertEqual( + xform_replace_empty_auditee_contact_title(input_data), expected_output + ) + + def test_missing_auditee_contact_title(self): + """Test that a missing auditee_contact_title key is added and set to 'GSA_MIGRATION'""" + input_data = {} + expected_output = {"auditee_contact_title": settings.GSA_MIGRATION} + self.assertEqual( + xform_replace_empty_auditee_contact_title(input_data), expected_output + ) + + +class TestXformReplaceEmptyOrInvalidUEIs(SimpleTestCase): + class MockAuditHeader: + def __init__(self, UEI): + self.UEI = UEI + + def setUp(self): + self.audit_header = self.MockAuditHeader("") + self.valid_uei = "ZQGGHJH74DW7" + self.invalid_uei = "123" + self.uei_schema = { + "oneOf": [ + { + "allOf": [ + {"maxLength": 12, "minLength": 12}, + {"pattern": "^[A-HJ-NP-Z1-9][A-HJ-NP-Z0-9]+$"}, + { + "pattern": "^(?![A-HJ-NP-Z1-9]+[A-HJ-NP-Z0-9]*?[0-9]{9})[A-HJ-NP-Z0-9]*$" + }, + {"pattern": "^(?![0-9]{9})"}, + ], + "type": "string", + }, + {"const": "GSA_MIGRATION", "type": "string"}, + ] + } + + def test_auditee_uei_valid(self): + """Test that valid auditee EIN is not replaced.""" + self.audit_header.UEI = "ZQGGHJH74DW7" + result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( + self.audit_header + ) + print(result) + self.assertEqual(result.UEI, "ZQGGHJH74DW7") + + def test_auditee_uei_invalid_replaced(self): + """Test that invalid auditee UEI is replaced.""" + self.audit_header.UEI = "invalid_uei" + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( + self.audit_header + ) + mock_track.assert_called_once_with( + "UEI", + "invalid_uei", + "auditee_uei", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration", + ) + self.assertEqual(result.UEI, settings.GSA_MIGRATION) + + def test_auditee_uei_empty_replaced(self): + """Test that empty auditee UEI is replaced.""" + self.audit_header.UEI = "auditee_uei" + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditee_uei_with_gsa_migration( + self.audit_header + ) + mock_track.assert_called_once() + self.assertEqual(result.UEI, settings.GSA_MIGRATION) + + @patch("builtins.open", side_effect=FileNotFoundError) + @patch( + "census_historical_migration.sac_general_lib.general_information.settings.OUTPUT_BASE_DIR", + "some/dir", + ) + def test_missing_schema_file(self, mock_open): + with self.assertRaises(DataMigrationError) as context: + is_uei_valid(self.valid_uei) + self.assertIn( + "UeiSchema.json file not found in some/dir", str(context.exception) + ) + + @patch("builtins.open", new_callable=mock_open, read_data="invalid json") + @patch( + "json.load", + side_effect=json.decoder.JSONDecodeError( + "Expecting value", "line 1 column 1 (char 0)", 0 + ), + ) + @patch( + "census_historical_migration.sac_general_lib.general_information.settings.OUTPUT_BASE_DIR", + "some/dir", + ) + def test_invalid_json_schema_file(self, mock_json_load, mock_open): + with self.assertRaises(DataMigrationError) as context: + is_uei_valid(self.valid_uei) + self.assertIn( + "UeiSchema.json file contains invalid JSON", str(context.exception) + ) + + +class TestXformReplaceEmptyOrInvalidEins(SimpleTestCase): + def test_auditor_ein_valid(self): + """Test that valid auditor EIN is not replaced.""" + info = {"auditor_ein": "123456789"} + result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) + self.assertEqual(result["auditor_ein"], "123456789") + + def test_auditor_ein_invalid_replaced(self): + """Test that invalid auditor EIN is replaced.""" + info = {"auditor_ein": "invalid_ein"} + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) + mock_track.assert_called_once_with( + "AUDITOR_EIN", + "invalid_ein", + "auditor_ein", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration", + ) + self.assertEqual(result["auditor_ein"], settings.GSA_MIGRATION) + + def test_auditor_ein_empty_replaced(self): + """Test that empty auditor EIN is replaced.""" + info = {"auditor_ein": ""} + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditor_ein_with_gsa_migration(info) + mock_track.assert_called_once() + self.assertEqual(result["auditor_ein"], settings.GSA_MIGRATION) + + def test_auditee_ein_valid(self): + """Test that valid auditee EIN is not replaced.""" + info = {"ein": "123456789"} + result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) + self.assertEqual(result["ein"], "123456789") + + def test_auditee_ein_invalid_replaced(self): + """Test that invalid auditee EIN is replaced.""" + info = {"ein": "invalid_ein"} + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) + mock_track.assert_called_once_with( + "EIN", + "invalid_ein", + "auditee_ein", + settings.GSA_MIGRATION, + "xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration", + ) + self.assertEqual(result["ein"], settings.GSA_MIGRATION) + + def test_auditee_ein_empty_replaced(self): + """Test that empty auditee EIN is replaced.""" + info = {"ein": ""} + with patch( + "census_historical_migration.sac_general_lib.general_information.track_transformations" + ) as mock_track: + result = xform_replace_empty_or_invalid_auditee_ein_with_gsa_migration(info) + mock_track.assert_called_once() + self.assertEqual(result["ein"], settings.GSA_MIGRATION) + + +class TestXformReplaceEmptyAuditorZip(SimpleTestCase): + def test_empty_auditor_zip(self): + """Test that an empty US auditor_zip and auditee_zip are replaced with 'GSA_MIGRATION'""" + input_data = { + "auditee_zip": "", + "auditor_country": "USA", + "auditor_zip": "", + } + expected_output = { + "auditee_zip": settings.GSA_MIGRATION, + "auditor_country": "USA", + "auditor_zip": settings.GSA_MIGRATION, + } + self.assertEqual(xform_replace_empty_zips(input_data), expected_output) + + def test_empty_non_us_auditor_zip(self): + """Test that an empty non-US auditor_zip is not replaced with 'GSA_MIGRATION'""" + input_data = { + "auditee_zip": "", + "auditor_country": "non-USA", + "auditor_zip": "", + } + expected_output = { + "auditee_zip": settings.GSA_MIGRATION, + "auditor_country": "non-USA", + "auditor_zip": "", + } + self.assertEqual(xform_replace_empty_zips(input_data), expected_output) + + def test_non_empty_auditor_zip(self): + """Test that a non-empty US auditor_zip and auditee_zip remain unchanged""" + input_data = { + "auditee_zip": "10108", + "auditor_country": "USA", + "auditor_zip": "12345", + } + self.assertEqual(xform_replace_empty_zips(input_data), input_data) + + +class TestXformAuditPeriodOtherMonths(SimpleTestCase): + class MockAuditHeader: + def __init__(self, PERIODCOVERED, NUMBERMONTHS): + self.PERIODCOVERED = PERIODCOVERED + self.NUMBERMONTHS = NUMBERMONTHS + + def setUp(self): + self.audit_header = self.MockAuditHeader("", "") + + def test_periodcovered_other_zfill(self): + """Test that audit_period_other_months is set to NUMBERMONTHS with padded zeroes""" + self.audit_header.PERIODCOVERED = "O" + self.audit_header.NUMBERMONTHS = "6" + general_information = { + "audit_period_other_months": "", + } + expected_output = { + "audit_period_other_months": "06", + } + xform_audit_period_other_months(general_information, self.audit_header) + self.assertEqual( + general_information, + expected_output, + ) + + def test_periodcovered_other_no_zfill(self): + """Test that audit_period_other_months is set to NUMBERMONTHS without padded zeroes""" + self.audit_header.PERIODCOVERED = "O" + self.audit_header.NUMBERMONTHS = "14" + general_information = { + "audit_period_other_months": "", + } + expected_output = { + "audit_period_other_months": "14", + } + xform_audit_period_other_months(general_information, self.audit_header) + self.assertEqual( + general_information, + expected_output, + ) + + def test_periodcovered_not_other(self): + """Test that audit_period_other_months is not set""" + self.audit_header.PERIODCOVERED = "A" + self.audit_header.NUMBERMONTHS = "10" + general_information = { + "audit_period_other_months": "", + } + expected_output = { + "audit_period_other_months": "", + } + xform_audit_period_other_months(general_information, self.audit_header) + self.assertEqual( + general_information, + expected_output, + ) diff --git a/backend/census_historical_migration/test_notes_to_sefa_xforms.py b/backend/census_historical_migration/test_notes_to_sefa_xforms.py index 3d6261202a..d1b8032936 100644 --- a/backend/census_historical_migration/test_notes_to_sefa_xforms.py +++ b/backend/census_historical_migration/test_notes_to_sefa_xforms.py @@ -1,518 +1,518 @@ -from django.conf import settings -from django.test import SimpleTestCase - -from .exception_utils import DataMigrationError -from .workbooklib.notes_to_sefa import ( - xform_is_minimis_rate_used, - xform_missing_note_title_and_content, - xform_missing_notes_records_v2, - xform_rate_content, - xform_policies_content, - xform_sanitize_policies_content, -) - - -class TestXformIsMinimisRateUsed(SimpleTestCase): - def test_rate_used(self): - """Test that the function returns 'Y' when the rate is used.""" - self.assertEqual( - xform_is_minimis_rate_used("The auditee used the de minimis cost rate."), - "Y", - ) - - self.assertEqual( - xform_is_minimis_rate_used( - "The School has elected to use the 10-percent de minimis indirect cost rate as allowed under the Uniform Guidance." - ), - "Y", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "They have used the de minimis rate for this project." - ), - "Y", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The auditee organization elected to use the de minimis rate." - ), - "Y", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The de minimis rate is used and is allowed under our policy." - ), - "Y", - ) - self.assertEqual( - xform_is_minimis_rate_used("The Organization utilizes the 10% de minimis"), - "Y", - ) - - def test_rate_not_used(self): - """Test that the function returns 'N' when the rate is not used.""" - self.assertEqual( - xform_is_minimis_rate_used( - "The auditee did not use the de minimis cost rate." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The Board has elected not to use the 10 percent de minimus indirect cost as allowed under the Uniform Guidance." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The organization did not use the de minimis rate." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "It was decided not to use the de minimis rate in this case." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The institution has elected not to use the de minimis rate." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "There are no additional indirect costs allocated to the Corporation." - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The Project has decided not to utilize the ten percent de minimis" - ), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used("The Symphony did not utilize a 10%"), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used("10% de minimis rate option was not utilized"), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used("Did not make this election"), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used("Has not made an election"), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used("No election has been made"), - "N", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "IntraHealth negotiates and utilizes an indirect cost rate with the federal government and therefore does not utilize the 10% de minimis cost rate option under Uniform Guidance." - ), - "N", - ) - - def test_rate_with_multiple_spaces(self): - """Test that the function returns the correct results when the rate is used and there are multiple spaces between words.""" - self.assertEqual( - xform_is_minimis_rate_used( - "We have elected to use the de minimis rate." - ), - "Y", - ) - self.assertEqual( - xform_is_minimis_rate_used( - "The organization did not use the de minimis rate." - ), - "N", - ) - - def test_ambiguous_or_unclear_raises_exception(self): - """Test that the function raises an exception when rate usage is ambiguous or unclear.""" - with self.assertRaises(DataMigrationError): - xform_is_minimis_rate_used( - "The information regarding the de minimis rate is not clear." - ) - - with self.assertRaises(DataMigrationError): - xform_is_minimis_rate_used( - "It is unknown whether the de minimis rate was applied." - ) - - def test_empty_string(self): - """Test that the function returns GSA MIGRATION keyword when the input is an empty string.""" - self.assertEqual(xform_is_minimis_rate_used(""), settings.GSA_MIGRATION) - - -class TestXformMissingNoteTitleAndContent(SimpleTestCase): - class MockNote: - def __init__( - self, - TITLE, - CONTENT, - TYPE_ID, - ): - self.TITLE = TITLE - self.CONTENT = CONTENT - self.TYPE_ID = TYPE_ID - - def _mock_notes_no_title(self): - notes = [] - notes.append( - self.MockNote( - TITLE="", - CONTENT="SUPPORTIVE HOUSING FOR THE ELDERLY (14.157) - Balances outstanding at the end of the audit period were 3356.", - TYPE_ID=3, - ) - ) - notes.append( - self.MockNote( - TITLE="", - CONTENT="MORTGAGE INSURANCE FOR THE PURCHASE OR REFINANCING OF EXISTING MULTIFAMILY HOUSING PROJECTS (14.155) - Balances outstanding at the end of the audit period were 4040.", - TYPE_ID=3, - ) - ) - return notes - - def _mock_notes_no_content(self): - notes = [] - notes.append( - self.MockNote( - TITLE="Loan/loan guarantee outstanding balances", - CONTENT="", - TYPE_ID=3, - ) - ) - notes.append( - self.MockNote( - TITLE="Federally Funded Insured Mortgages and Capital Advances", - CONTENT="", - TYPE_ID=3, - ) - ) - return notes - - def _mock_notes_with_title_content(self): - notes = [] - notes.append( - self.MockNote( - TITLE="Loan/loan guarantee outstanding balances", - CONTENT="SUPPORTIVE HOUSING FOR THE ELDERLY (14.157) - Balances outstanding at the end of the audit period were 4000.", - TYPE_ID=3, - ) - ) - notes.append( - self.MockNote( - TITLE="Federally Funded Insured Mortgages and Capital Advances", - CONTENT="MORTGAGE INSURANCE FOR THE PURCHASE OR REFINANCING OF EXISTING MULTIFAMILY HOUSING PROJECTS (14.155) - Balances outstanding at the end of the audit period were 5000.", - TYPE_ID=3, - ) - ) - return notes - - def test_note_w_no_title(self): - notes = self._mock_notes_no_title() - result = xform_missing_note_title_and_content(notes) - for note in result: - self.assertIn(settings.GSA_MIGRATION, note.TITLE) - self.assertNotIn(settings.GSA_MIGRATION, note.CONTENT) - - def test_note_w_no_content(self): - notes = self._mock_notes_no_content() - result = xform_missing_note_title_and_content(notes) - for note in result: - self.assertNotIn(settings.GSA_MIGRATION, note.TITLE) - self.assertIn(settings.GSA_MIGRATION, note.CONTENT) - - def test_note_with_title_content(self): - notes = self._mock_notes_with_title_content() - result = xform_missing_note_title_and_content(notes) - for note in result: - self.assertNotIn(settings.GSA_MIGRATION, note.TITLE) - self.assertNotIn(settings.GSA_MIGRATION, note.CONTENT) - - -class TestXformMissingNotesRecordsV2(SimpleTestCase): - class MockAuditHeader: - def __init__( - self, - DBKEY, - AUDITYEAR, - ): - self.DBKEY = DBKEY - self.AUDITYEAR = AUDITYEAR - - class MockNote: - def __init__( - self, - TITLE, - CONTENT, - TYPE_ID, - DBKEY, - AUDITYEAR, - ): - self.TITLE = TITLE - self.CONTENT = CONTENT - self.TYPE_ID = TYPE_ID - self.DBKEY = DBKEY - self.AUDITYEAR = AUDITYEAR - - def _mock_notes_valid_year_policies_no_rate(self): - notes = [] - notes.append( - self.MockNote( - TITLE="Significant Accounting Policies Used in Preparing the SEFA", - CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", - TYPE_ID=1, - DBKEY="123456789", - AUDITYEAR="2022", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="123456789", - AUDITYEAR="2022", - ) - return notes, audit_header - - def _mock_notes_valid_year_rate_no_policies(self): - notes = [] - notes.append( - self.MockNote( - TITLE="10% De Minimis Cost Rate", - CONTENT="The auditee did not elect to use the de minimis cost rate.", - TYPE_ID=2, - DBKEY="223456789", - AUDITYEAR="2021", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="223456789", - AUDITYEAR="2021", - ) - return notes, audit_header - - def _mock_notes_valid_year_no_rate_no_policies(self): - notes = [] - audit_header = self.MockAuditHeader( - DBKEY="223456788", - AUDITYEAR="2020", - ) - return notes, audit_header - - def _mock_notes_valid_year_rate_policies(self): - notes = [] - notes.append( - self.MockNote( - TITLE="10% De Minimis Cost Rate", - CONTENT="The auditee did not elect to use the de minimis cost rate.", - TYPE_ID=2, - DBKEY="123456788", - AUDITYEAR="2018", - ) - ) - notes.append( - self.MockNote( - TITLE="Significant Accounting Policies Used in Preparing the SEFA", - CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", - TYPE_ID=1, - DBKEY="123456788", - AUDITYEAR="2018", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="123456788", - AUDITYEAR="2018", - ) - return notes, audit_header - - def _mock_notes_invalid_year_policies_no_rate(self): - notes = [] - notes.append( - self.MockNote( - TITLE="Significant Accounting Policies Used in Preparing the SEFA", - CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", - TYPE_ID=1, - DBKEY="124456788", - AUDITYEAR="2015", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="124456788", - AUDITYEAR="2015", - ) - return notes, audit_header - - def _mock_notes_invalid_year_rate_no_policies(self): - notes = [] - notes.append( - self.MockNote( - TITLE="10% De Minimis Cost Rate", - CONTENT="The auditee did not elect to use the de minimis cost rate.", - TYPE_ID=2, - DBKEY="124456789", - AUDITYEAR="2014", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="124456789", - AUDITYEAR="2014", - ) - return notes, audit_header - - def _mock_notes_invalid_year_no_rate_no_policies(self): - notes = [] - audit_header = self.MockAuditHeader( - DBKEY="134456788", - AUDITYEAR="2013", - ) - return notes, audit_header - - def _mock_notes_invalid_year_rate_policies(self): - notes = [] - notes.append( - self.MockNote( - TITLE="10% De Minimis Cost Rate", - CONTENT="The auditee did not elect to use the de minimis cost rate.", - TYPE_ID=2, - DBKEY="124466788", - AUDITYEAR="1800", - ) - ) - notes.append( - self.MockNote( - TITLE="Significant Accounting Policies Used in Preparing the SEFA", - CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", - TYPE_ID=1, - DBKEY="124466788", - AUDITYEAR="1800", - ) - ) - audit_header = self.MockAuditHeader( - DBKEY="124466788", - AUDITYEAR="1800", - ) - return notes, audit_header - - def test_xform_missing_notes_records_v2_with_valid_year_policies_no_rate(self): - notes, audit_header = self._mock_notes_valid_year_policies_no_rate() - policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ - 0 - ].CONTENT - rate_content = "" - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - def test_xform_missing_notes_records_v2_with_valid_year_rate_no_policies(self): - notes, audit_header = self._mock_notes_valid_year_rate_no_policies() - policies_content = "" - rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(rate_content, result_rate_content) - self.assertEqual(policies_content, result_policies_content) - - def test_xform_missing_notes_records_v2_with_valid_year_no_rate_no_policies(self): - notes, audit_header = self._mock_notes_valid_year_no_rate_no_policies() - policies_content = "" - rate_content = "" - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(settings.GSA_MIGRATION, result_policies_content) - self.assertEqual(settings.GSA_MIGRATION, result_rate_content) - - def test_xform_missing_notes_records_v2_with_valid_year_rate_policies(self): - notes, audit_header = self._mock_notes_valid_year_rate_policies() - policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ - 0 - ].CONTENT - rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - def test_xform_missing_notes_records_v2_with_invalid_year_policies_no_rate(self): - notes, audit_header = self._mock_notes_invalid_year_policies_no_rate() - policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ - 0 - ].CONTENT - rate_content = "" - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - def test_xform_missing_notes_records_v2_with_invalid_year_rate_no_policies(self): - notes, audit_header = self._mock_notes_invalid_year_rate_no_policies() - policies_content = "" - rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - def test_xform_missing_notes_records_v2_with_invalid_year_no_rate_no_policies(self): - notes, audit_header = self._mock_notes_invalid_year_no_rate_no_policies() - policies_content = "" - rate_content = "" - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - def test_xform_missing_notes_records_v2_with_invalid_year_rate_policies(self): - notes, audit_header = self._mock_notes_invalid_year_rate_policies() - policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ - 0 - ].CONTENT - rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT - result_policies_content, result_rate_content = xform_missing_notes_records_v2( - audit_header, policies_content, rate_content - ) - self.assertEqual(policies_content, result_policies_content) - self.assertEqual(rate_content, result_rate_content) - - -class TestXformRateContent(SimpleTestCase): - def test_blank_rate_content(self): - self.assertEqual(xform_rate_content(""), settings.GSA_MIGRATION) - - def test_non_blank_rate_content(self): - self.assertEqual(xform_rate_content("test_rate"), "test_rate") - - -class TestXformPoliciesContent(SimpleTestCase): - def test_blank_policies_content(self): - self.assertEqual(xform_policies_content(""), settings.GSA_MIGRATION) - - def test_non_blank_policies_content(self): - self.assertEqual(xform_policies_content("test_policies"), "test_policies") - - -class TestXformSanitizePoliciesContent(SimpleTestCase): - def test_special_char_policies_content(self): - self.assertEqual( - xform_sanitize_policies_content("====test_policies"), "test_policies" - ) - - def test_no_special_char_policies_content(self): - self.assertEqual( - xform_sanitize_policies_content("test_policies"), "test_policies" - ) +from django.conf import settings +from django.test import SimpleTestCase + +from .exception_utils import DataMigrationError +from .workbooklib.notes_to_sefa import ( + xform_is_minimis_rate_used, + xform_missing_note_title_and_content, + xform_missing_notes_records_v2, + xform_rate_content, + xform_policies_content, + xform_sanitize_policies_content, +) + + +class TestXformIsMinimisRateUsed(SimpleTestCase): + def test_rate_used(self): + """Test that the function returns 'Y' when the rate is used.""" + self.assertEqual( + xform_is_minimis_rate_used("The auditee used the de minimis cost rate."), + "Y", + ) + + self.assertEqual( + xform_is_minimis_rate_used( + "The School has elected to use the 10-percent de minimis indirect cost rate as allowed under the Uniform Guidance." + ), + "Y", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "They have used the de minimis rate for this project." + ), + "Y", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The auditee organization elected to use the de minimis rate." + ), + "Y", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The de minimis rate is used and is allowed under our policy." + ), + "Y", + ) + self.assertEqual( + xform_is_minimis_rate_used("The Organization utilizes the 10% de minimis"), + "Y", + ) + + def test_rate_not_used(self): + """Test that the function returns 'N' when the rate is not used.""" + self.assertEqual( + xform_is_minimis_rate_used( + "The auditee did not use the de minimis cost rate." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The Board has elected not to use the 10 percent de minimus indirect cost as allowed under the Uniform Guidance." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The organization did not use the de minimis rate." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "It was decided not to use the de minimis rate in this case." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The institution has elected not to use the de minimis rate." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "There are no additional indirect costs allocated to the Corporation." + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The Project has decided not to utilize the ten percent de minimis" + ), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used("The Symphony did not utilize a 10%"), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used("10% de minimis rate option was not utilized"), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used("Did not make this election"), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used("Has not made an election"), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used("No election has been made"), + "N", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "IntraHealth negotiates and utilizes an indirect cost rate with the federal government and therefore does not utilize the 10% de minimis cost rate option under Uniform Guidance." + ), + "N", + ) + + def test_rate_with_multiple_spaces(self): + """Test that the function returns the correct results when the rate is used and there are multiple spaces between words.""" + self.assertEqual( + xform_is_minimis_rate_used( + "We have elected to use the de minimis rate." + ), + "Y", + ) + self.assertEqual( + xform_is_minimis_rate_used( + "The organization did not use the de minimis rate." + ), + "N", + ) + + def test_ambiguous_or_unclear_raises_exception(self): + """Test that the function raises an exception when rate usage is ambiguous or unclear.""" + with self.assertRaises(DataMigrationError): + xform_is_minimis_rate_used( + "The information regarding the de minimis rate is not clear." + ) + + with self.assertRaises(DataMigrationError): + xform_is_minimis_rate_used( + "It is unknown whether the de minimis rate was applied." + ) + + def test_empty_string(self): + """Test that the function returns GSA MIGRATION keyword when the input is an empty string.""" + self.assertEqual(xform_is_minimis_rate_used(""), settings.GSA_MIGRATION) + + +class TestXformMissingNoteTitleAndContent(SimpleTestCase): + class MockNote: + def __init__( + self, + TITLE, + CONTENT, + TYPE_ID, + ): + self.TITLE = TITLE + self.CONTENT = CONTENT + self.TYPE_ID = TYPE_ID + + def _mock_notes_no_title(self): + notes = [] + notes.append( + self.MockNote( + TITLE="", + CONTENT="SUPPORTIVE HOUSING FOR THE ELDERLY (14.157) - Balances outstanding at the end of the audit period were 3356.", + TYPE_ID=3, + ) + ) + notes.append( + self.MockNote( + TITLE="", + CONTENT="MORTGAGE INSURANCE FOR THE PURCHASE OR REFINANCING OF EXISTING MULTIFAMILY HOUSING PROJECTS (14.155) - Balances outstanding at the end of the audit period were 4040.", + TYPE_ID=3, + ) + ) + return notes + + def _mock_notes_no_content(self): + notes = [] + notes.append( + self.MockNote( + TITLE="Loan/loan guarantee outstanding balances", + CONTENT="", + TYPE_ID=3, + ) + ) + notes.append( + self.MockNote( + TITLE="Federally Funded Insured Mortgages and Capital Advances", + CONTENT="", + TYPE_ID=3, + ) + ) + return notes + + def _mock_notes_with_title_content(self): + notes = [] + notes.append( + self.MockNote( + TITLE="Loan/loan guarantee outstanding balances", + CONTENT="SUPPORTIVE HOUSING FOR THE ELDERLY (14.157) - Balances outstanding at the end of the audit period were 4000.", + TYPE_ID=3, + ) + ) + notes.append( + self.MockNote( + TITLE="Federally Funded Insured Mortgages and Capital Advances", + CONTENT="MORTGAGE INSURANCE FOR THE PURCHASE OR REFINANCING OF EXISTING MULTIFAMILY HOUSING PROJECTS (14.155) - Balances outstanding at the end of the audit period were 5000.", + TYPE_ID=3, + ) + ) + return notes + + def test_note_w_no_title(self): + notes = self._mock_notes_no_title() + result = xform_missing_note_title_and_content(notes) + for note in result: + self.assertIn(settings.GSA_MIGRATION, note.TITLE) + self.assertNotIn(settings.GSA_MIGRATION, note.CONTENT) + + def test_note_w_no_content(self): + notes = self._mock_notes_no_content() + result = xform_missing_note_title_and_content(notes) + for note in result: + self.assertNotIn(settings.GSA_MIGRATION, note.TITLE) + self.assertIn(settings.GSA_MIGRATION, note.CONTENT) + + def test_note_with_title_content(self): + notes = self._mock_notes_with_title_content() + result = xform_missing_note_title_and_content(notes) + for note in result: + self.assertNotIn(settings.GSA_MIGRATION, note.TITLE) + self.assertNotIn(settings.GSA_MIGRATION, note.CONTENT) + + +class TestXformMissingNotesRecordsV2(SimpleTestCase): + class MockAuditHeader: + def __init__( + self, + DBKEY, + AUDITYEAR, + ): + self.DBKEY = DBKEY + self.AUDITYEAR = AUDITYEAR + + class MockNote: + def __init__( + self, + TITLE, + CONTENT, + TYPE_ID, + DBKEY, + AUDITYEAR, + ): + self.TITLE = TITLE + self.CONTENT = CONTENT + self.TYPE_ID = TYPE_ID + self.DBKEY = DBKEY + self.AUDITYEAR = AUDITYEAR + + def _mock_notes_valid_year_policies_no_rate(self): + notes = [] + notes.append( + self.MockNote( + TITLE="Significant Accounting Policies Used in Preparing the SEFA", + CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", + TYPE_ID=1, + DBKEY="123456789", + AUDITYEAR="2022", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="123456789", + AUDITYEAR="2022", + ) + return notes, audit_header + + def _mock_notes_valid_year_rate_no_policies(self): + notes = [] + notes.append( + self.MockNote( + TITLE="10% De Minimis Cost Rate", + CONTENT="The auditee did not elect to use the de minimis cost rate.", + TYPE_ID=2, + DBKEY="223456789", + AUDITYEAR="2021", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="223456789", + AUDITYEAR="2021", + ) + return notes, audit_header + + def _mock_notes_valid_year_no_rate_no_policies(self): + notes = [] + audit_header = self.MockAuditHeader( + DBKEY="223456788", + AUDITYEAR="2020", + ) + return notes, audit_header + + def _mock_notes_valid_year_rate_policies(self): + notes = [] + notes.append( + self.MockNote( + TITLE="10% De Minimis Cost Rate", + CONTENT="The auditee did not elect to use the de minimis cost rate.", + TYPE_ID=2, + DBKEY="123456788", + AUDITYEAR="2018", + ) + ) + notes.append( + self.MockNote( + TITLE="Significant Accounting Policies Used in Preparing the SEFA", + CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", + TYPE_ID=1, + DBKEY="123456788", + AUDITYEAR="2018", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="123456788", + AUDITYEAR="2018", + ) + return notes, audit_header + + def _mock_notes_invalid_year_policies_no_rate(self): + notes = [] + notes.append( + self.MockNote( + TITLE="Significant Accounting Policies Used in Preparing the SEFA", + CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", + TYPE_ID=1, + DBKEY="124456788", + AUDITYEAR="2015", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="124456788", + AUDITYEAR="2015", + ) + return notes, audit_header + + def _mock_notes_invalid_year_rate_no_policies(self): + notes = [] + notes.append( + self.MockNote( + TITLE="10% De Minimis Cost Rate", + CONTENT="The auditee did not elect to use the de minimis cost rate.", + TYPE_ID=2, + DBKEY="124456789", + AUDITYEAR="2014", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="124456789", + AUDITYEAR="2014", + ) + return notes, audit_header + + def _mock_notes_invalid_year_no_rate_no_policies(self): + notes = [] + audit_header = self.MockAuditHeader( + DBKEY="134456788", + AUDITYEAR="2013", + ) + return notes, audit_header + + def _mock_notes_invalid_year_rate_policies(self): + notes = [] + notes.append( + self.MockNote( + TITLE="10% De Minimis Cost Rate", + CONTENT="The auditee did not elect to use the de minimis cost rate.", + TYPE_ID=2, + DBKEY="124466788", + AUDITYEAR="1800", + ) + ) + notes.append( + self.MockNote( + TITLE="Significant Accounting Policies Used in Preparing the SEFA", + CONTENT="Expenditures reported on the Schedule are reported on the accrual basis of accounting. Such expenditures are recognized following the cost principles contained in the Uniform Guidance, wherein certain types of expenditures are not allowable or are limited as to reimbursement.", + TYPE_ID=1, + DBKEY="124466788", + AUDITYEAR="1800", + ) + ) + audit_header = self.MockAuditHeader( + DBKEY="124466788", + AUDITYEAR="1800", + ) + return notes, audit_header + + def test_xform_missing_notes_records_v2_with_valid_year_policies_no_rate(self): + notes, audit_header = self._mock_notes_valid_year_policies_no_rate() + policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ + 0 + ].CONTENT + rate_content = "" + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + def test_xform_missing_notes_records_v2_with_valid_year_rate_no_policies(self): + notes, audit_header = self._mock_notes_valid_year_rate_no_policies() + policies_content = "" + rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(rate_content, result_rate_content) + self.assertEqual(policies_content, result_policies_content) + + def test_xform_missing_notes_records_v2_with_valid_year_no_rate_no_policies(self): + notes, audit_header = self._mock_notes_valid_year_no_rate_no_policies() + policies_content = "" + rate_content = "" + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(settings.GSA_MIGRATION, result_policies_content) + self.assertEqual(settings.GSA_MIGRATION, result_rate_content) + + def test_xform_missing_notes_records_v2_with_valid_year_rate_policies(self): + notes, audit_header = self._mock_notes_valid_year_rate_policies() + policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ + 0 + ].CONTENT + rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + def test_xform_missing_notes_records_v2_with_invalid_year_policies_no_rate(self): + notes, audit_header = self._mock_notes_invalid_year_policies_no_rate() + policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ + 0 + ].CONTENT + rate_content = "" + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + def test_xform_missing_notes_records_v2_with_invalid_year_rate_no_policies(self): + notes, audit_header = self._mock_notes_invalid_year_rate_no_policies() + policies_content = "" + rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + def test_xform_missing_notes_records_v2_with_invalid_year_no_rate_no_policies(self): + notes, audit_header = self._mock_notes_invalid_year_no_rate_no_policies() + policies_content = "" + rate_content = "" + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + def test_xform_missing_notes_records_v2_with_invalid_year_rate_policies(self): + notes, audit_header = self._mock_notes_invalid_year_rate_policies() + policies_content = list(filter(lambda note: note.TYPE_ID == 1, notes))[ + 0 + ].CONTENT + rate_content = list(filter(lambda note: note.TYPE_ID == 2, notes))[0].CONTENT + result_policies_content, result_rate_content = xform_missing_notes_records_v2( + audit_header, policies_content, rate_content + ) + self.assertEqual(policies_content, result_policies_content) + self.assertEqual(rate_content, result_rate_content) + + +class TestXformRateContent(SimpleTestCase): + def test_blank_rate_content(self): + self.assertEqual(xform_rate_content(""), settings.GSA_MIGRATION) + + def test_non_blank_rate_content(self): + self.assertEqual(xform_rate_content("test_rate"), "test_rate") + + +class TestXformPoliciesContent(SimpleTestCase): + def test_blank_policies_content(self): + self.assertEqual(xform_policies_content(""), settings.GSA_MIGRATION) + + def test_non_blank_policies_content(self): + self.assertEqual(xform_policies_content("test_policies"), "test_policies") + + +class TestXformSanitizePoliciesContent(SimpleTestCase): + def test_special_char_policies_content(self): + self.assertEqual( + xform_sanitize_policies_content("====test_policies"), "test_policies" + ) + + def test_no_special_char_policies_content(self): + self.assertEqual( + xform_sanitize_policies_content("test_policies"), "test_policies" + ) diff --git a/backend/census_historical_migration/test_sac_creation.py b/backend/census_historical_migration/test_sac_creation.py index 423c1c3e90..bf60ff967d 100644 --- a/backend/census_historical_migration/test_sac_creation.py +++ b/backend/census_historical_migration/test_sac_creation.py @@ -1,117 +1,117 @@ -from .sac_general_lib.sac_creator import setup_sac -from audit.models import User - -from model_bakery import baker -from unittest import TestCase -from django.conf import settings - - -class TestSacTribalConsent(TestCase): - class MockAuditHeader: - def __init__( - self, - entity_type, - suppression_code, - ): - self.AUDITEECONTACT = "Scrooge Jones" - self.AUDITEEEMAIL = "auditee.mcaudited@leftfield.com" - self.AUDITEENAME = "Designate Representative" - self.AUDITEEPHONE = "5558675309" - self.AUDITEETITLE = "Lord of Doors" - self.AUDITOR_EIN = "987654321" - self.AUDITTYPE = "S" - self.AUDITYEAR = "2022" - self.CITY = "New York" - self.CPACITY = "Podunk" - self.CPACONTACT = "Qualified Human Accountant" - self.CPACOUNTRY = "US" - self.CPAEMAIL = "qualified.human.accountant@dollarauditstore.com" - self.CPAFIRMNAME = "Dollar Audit Store" - self.CPAPHONE = "0008675309" - self.CPASTATE = "NY" - self.CPASTREET1 = "100 Percent Respectable St." - self.CPATITLE = "Just an ordinary person" - self.CPAZIPCODE = "14886" - self.DBKEY = "123456789" - self.DOLLARTHRESHOLD = "750000" - self.EIN = "134278617" - self.ENTITY_TYPE = entity_type - self.FYENDDATE = "01/01/2022 00:00:00" - self.GOINGCONCERN = "N" - self.LOWRISK = "N" - self.MATERIALNONCOMPLIANCE = "N" - self.MATERIALWEAKNESS = "N" - self.MATERIALWEAKNESS_MP = "N" - self.MULTIPLE_CPAS = "N" - self.MULTIPLEEINS = "N" - self.MULTIPLEUEIS = "N" - self.PERIODCOVERED = "A" - self.REPORTABLECONDITION = "N" - self.SP_FRAMEWORK = "cash" - self.SP_FRAMEWORK_REQUIRED = "Y" - self.STATE = "NY" - self.STREET1 = "200 feet into left field" - self.SUPPRESSION_CODE = suppression_code - self.TYPEREPORT_FS = "UQADS" - self.TYPEREPORT_SP_FRAMEWORK = "UQAD" - self.UEI = "ZQGGHJH74DW7" - self.ZIPCODE = "10451" - self.COGAGENCY = "14" - self.OVERSIGHTAGENCY = "84" - - def _mock_audit_header(self, entity_type, suppression_code=None): - """Returns a mock audit header with all necessary fields""" - return self.MockAuditHeader(entity_type, suppression_code) - - def _test_consent(self, consent, is_public): - """Tests the values found within sac.tribal_data_consent""" - self.assertEqual( - consent["tribal_authorization_certifying_official_title"], - settings.GSA_MIGRATION, - ) - self.assertEqual( - consent["is_tribal_information_authorized_to_be_public"], - is_public, - ) - self.assertEqual( - consent["tribal_authorization_certifying_official_name"], - settings.GSA_MIGRATION, - ) - - def test_non_tribal(self): - """Only 'tribal' entity types have tribal_data_consent populated""" - audit_header = self._mock_audit_header("non-profit") - user = baker.make(User) - sac = setup_sac(user, audit_header) - - self.assertIsNone(sac.tribal_data_consent) - - def test_tribal_public(self): - """Misc suppression codes makes them public""" - audit_header = self._mock_audit_header("tribal", "foo") - user = baker.make(User) - sac = setup_sac(user, audit_header) - consent = sac.tribal_data_consent - - self.assertIsNotNone(sac.tribal_data_consent) - self._test_consent(consent, True) - - def test_tribal_public_no_code(self): - """A missing suppression code makes them public""" - audit_header = self._mock_audit_header("tribal", "") - user = baker.make(User) - sac = setup_sac(user, audit_header) - consent = sac.tribal_data_consent - - self.assertIsNotNone(sac.tribal_data_consent) - self._test_consent(consent, True) - - def test_tribal_private(self): - """A tribal audit with suppression code 'it' will be private""" - audit_header = self._mock_audit_header("tribal", "it") - user = baker.make(User) - sac = setup_sac(user, audit_header) - consent = sac.tribal_data_consent - - self.assertIsNotNone(sac.tribal_data_consent) - self._test_consent(consent, False) +from .sac_general_lib.sac_creator import setup_sac +from audit.models import User + +from model_bakery import baker +from unittest import TestCase +from django.conf import settings + + +class TestSacTribalConsent(TestCase): + class MockAuditHeader: + def __init__( + self, + entity_type, + suppression_code, + ): + self.AUDITEECONTACT = "Scrooge Jones" + self.AUDITEEEMAIL = "auditee.mcaudited@leftfield.com" + self.AUDITEENAME = "Designate Representative" + self.AUDITEEPHONE = "5558675309" + self.AUDITEETITLE = "Lord of Doors" + self.AUDITOR_EIN = "987654321" + self.AUDITTYPE = "S" + self.AUDITYEAR = "2022" + self.CITY = "New York" + self.CPACITY = "Podunk" + self.CPACONTACT = "Qualified Human Accountant" + self.CPACOUNTRY = "US" + self.CPAEMAIL = "qualified.human.accountant@dollarauditstore.com" + self.CPAFIRMNAME = "Dollar Audit Store" + self.CPAPHONE = "0008675309" + self.CPASTATE = "NY" + self.CPASTREET1 = "100 Percent Respectable St." + self.CPATITLE = "Just an ordinary person" + self.CPAZIPCODE = "14886" + self.DBKEY = "123456789" + self.DOLLARTHRESHOLD = "750000" + self.EIN = "134278617" + self.ENTITY_TYPE = entity_type + self.FYENDDATE = "01/01/2022 00:00:00" + self.GOINGCONCERN = "N" + self.LOWRISK = "N" + self.MATERIALNONCOMPLIANCE = "N" + self.MATERIALWEAKNESS = "N" + self.MATERIALWEAKNESS_MP = "N" + self.MULTIPLE_CPAS = "N" + self.MULTIPLEEINS = "N" + self.MULTIPLEUEIS = "N" + self.PERIODCOVERED = "A" + self.REPORTABLECONDITION = "N" + self.SP_FRAMEWORK = "cash" + self.SP_FRAMEWORK_REQUIRED = "Y" + self.STATE = "NY" + self.STREET1 = "200 feet into left field" + self.SUPPRESSION_CODE = suppression_code + self.TYPEREPORT_FS = "UQADS" + self.TYPEREPORT_SP_FRAMEWORK = "UQAD" + self.UEI = "ZQGGHJH74DW7" + self.ZIPCODE = "10451" + self.COGAGENCY = "14" + self.OVERSIGHTAGENCY = "84" + + def _mock_audit_header(self, entity_type, suppression_code=None): + """Returns a mock audit header with all necessary fields""" + return self.MockAuditHeader(entity_type, suppression_code) + + def _test_consent(self, consent, is_public): + """Tests the values found within sac.tribal_data_consent""" + self.assertEqual( + consent["tribal_authorization_certifying_official_title"], + settings.GSA_MIGRATION, + ) + self.assertEqual( + consent["is_tribal_information_authorized_to_be_public"], + is_public, + ) + self.assertEqual( + consent["tribal_authorization_certifying_official_name"], + settings.GSA_MIGRATION, + ) + + def test_non_tribal(self): + """Only 'tribal' entity types have tribal_data_consent populated""" + audit_header = self._mock_audit_header("non-profit") + user = baker.make(User) + sac = setup_sac(user, audit_header) + + self.assertIsNone(sac.tribal_data_consent) + + def test_tribal_public(self): + """Misc suppression codes makes them public""" + audit_header = self._mock_audit_header("tribal", "foo") + user = baker.make(User) + sac = setup_sac(user, audit_header) + consent = sac.tribal_data_consent + + self.assertIsNotNone(sac.tribal_data_consent) + self._test_consent(consent, True) + + def test_tribal_public_no_code(self): + """A missing suppression code makes them public""" + audit_header = self._mock_audit_header("tribal", "") + user = baker.make(User) + sac = setup_sac(user, audit_header) + consent = sac.tribal_data_consent + + self.assertIsNotNone(sac.tribal_data_consent) + self._test_consent(consent, True) + + def test_tribal_private(self): + """A tribal audit with suppression code 'it' will be private""" + audit_header = self._mock_audit_header("tribal", "it") + user = baker.make(User) + sac = setup_sac(user, audit_header) + consent = sac.tribal_data_consent + + self.assertIsNotNone(sac.tribal_data_consent) + self._test_consent(consent, False) diff --git a/backend/census_historical_migration/test_secondary_auditors_xforms.py b/backend/census_historical_migration/test_secondary_auditors_xforms.py index dbaeafa632..91e25825b8 100644 --- a/backend/census_historical_migration/test_secondary_auditors_xforms.py +++ b/backend/census_historical_migration/test_secondary_auditors_xforms.py @@ -1,144 +1,144 @@ -from django.conf import settings -from django.test import SimpleTestCase - -from census_historical_migration.change_record import InspectionRecord - -from .workbooklib.secondary_auditors import ( - xform_address_state, - xform_address_zipcode, - xform_cpafirmname, - xform_pad_contact_phone_with_nine, -) - - -class TestXformSecondaryAuditorsState(SimpleTestCase): - class SecondaryAuditor: - def __init__(self, state): - self.CPASTATE = state - - def test_normal_address_state(self): - # Setup specific test data - secondary_auditors = [ - self.SecondaryAuditor("CA"), - ] - - xform_address_state(secondary_auditors) - - self.assertEqual(secondary_auditors[0].CPASTATE, "CA") - - def test_missing_address_state(self): - # Setup specific test data - secondary_auditors = [ - self.SecondaryAuditor(""), - ] - - xform_address_state(secondary_auditors) - - self.assertEqual(secondary_auditors[0].CPASTATE, settings.GSA_MIGRATION) - - -class TestXformSecondaryAuditorsZipcode(SimpleTestCase): - class SecondaryAuditor: - def __init__(self, zipcode): - self.CPAZIPCODE = zipcode - - def test_normal_address_zipcode(self): - # Setup specific test data - secondary_auditors = [ - self.SecondaryAuditor("10108"), - ] - - xform_address_zipcode(secondary_auditors) - - self.assertEqual(secondary_auditors[0].CPAZIPCODE, "10108") - - def test_missing_address_zipcode(self): - # Setup specific test data - secondary_auditors = [ - self.SecondaryAuditor(""), - ] - - xform_address_zipcode(secondary_auditors) - - self.assertEqual(secondary_auditors[0].CPAZIPCODE, settings.GSA_MIGRATION) - - -class TestXformCpaFirmName(SimpleTestCase): - class MockSecondaryAuditorHeader: - def __init__( - self, - DBKEY, - CPAFIRMNAME, - ): - self.DBKEY = DBKEY - self.CPAFIRMNAME = CPAFIRMNAME - - def _mock_secondaryauditor_header(self): - """Returns a mock secondary_auditor with all necessary fields.""" - return [ - self.MockSecondaryAuditorHeader( - DBKEY="123456789", - CPAFIRMNAME="John Doe CPA Firm", - ), - self.MockSecondaryAuditorHeader( - DBKEY="223456789", - CPAFIRMNAME="Jack C CPA Firm", - ), - ] - - def test_valid_cpafirm(self): - """Test that the function does not change the valid CPAFIRMNAME.""" - secondary_auditors = self._mock_secondaryauditor_header() - cpas = secondary_auditors - xform_cpafirmname(secondary_auditors) - for index in range(len(secondary_auditors)): - self.assertEqual( - secondary_auditors[index].CPAFIRMNAME, cpas[index].CPAFIRMNAME - ) - - def test_blank_cpafirm(self): - """Test that the function changes blank CPAFIRMNAME to GSA_MIGRATION.""" - secondary_auditors = self._mock_secondaryauditor_header() - secondary_auditors[0].CPAFIRMNAME = "" - cpas = secondary_auditors - xform_cpafirmname(secondary_auditors) - self.assertEqual(secondary_auditors[0].CPAFIRMNAME, settings.GSA_MIGRATION) - self.assertEqual(secondary_auditors[1].CPAFIRMNAME, cpas[1].CPAFIRMNAME) - - -class TestXformPadContactPhoneWithNine(SimpleTestCase): - - class MockSecondaryAuditorHeader: - - def __init__( - self, - CPAPHONE, - ): - self.CPAPHONE = CPAPHONE - - def setUp(self): - self.secondary_auditors = [ - self.MockSecondaryAuditorHeader(CPAPHONE="12345"), - self.MockSecondaryAuditorHeader(CPAPHONE="999999999"), - self.MockSecondaryAuditorHeader(CPAPHONE="1234567890"), - self.MockSecondaryAuditorHeader(CPAPHONE="98765432"), - ] - - def test_pad_contact_phone(self): - xform_pad_contact_phone_with_nine(self.secondary_auditors) - - self.assertEqual(self.secondary_auditors[0].CPAPHONE, "12345") # No change - self.assertEqual( - self.secondary_auditors[1].CPAPHONE, "9999999999" - ) # Pad applied - self.assertEqual(self.secondary_auditors[2].CPAPHONE, "1234567890") # No change - self.assertEqual(self.secondary_auditors[3].CPAPHONE, "98765432") # No change - - def test_change_records(self): - secondary_auditors = [self.MockSecondaryAuditorHeader(CPAPHONE="999999999")] - change_records_before = len(InspectionRecord.change["secondary_auditor"]) - - xform_pad_contact_phone_with_nine(secondary_auditors) - - change_records_after = len(InspectionRecord.change["secondary_auditor"]) - self.assertEqual(change_records_after, change_records_before + 1) +from django.conf import settings +from django.test import SimpleTestCase + +from census_historical_migration.change_record import InspectionRecord + +from .workbooklib.secondary_auditors import ( + xform_address_state, + xform_address_zipcode, + xform_cpafirmname, + xform_pad_contact_phone_with_nine, +) + + +class TestXformSecondaryAuditorsState(SimpleTestCase): + class SecondaryAuditor: + def __init__(self, state): + self.CPASTATE = state + + def test_normal_address_state(self): + # Setup specific test data + secondary_auditors = [ + self.SecondaryAuditor("CA"), + ] + + xform_address_state(secondary_auditors) + + self.assertEqual(secondary_auditors[0].CPASTATE, "CA") + + def test_missing_address_state(self): + # Setup specific test data + secondary_auditors = [ + self.SecondaryAuditor(""), + ] + + xform_address_state(secondary_auditors) + + self.assertEqual(secondary_auditors[0].CPASTATE, settings.GSA_MIGRATION) + + +class TestXformSecondaryAuditorsZipcode(SimpleTestCase): + class SecondaryAuditor: + def __init__(self, zipcode): + self.CPAZIPCODE = zipcode + + def test_normal_address_zipcode(self): + # Setup specific test data + secondary_auditors = [ + self.SecondaryAuditor("10108"), + ] + + xform_address_zipcode(secondary_auditors) + + self.assertEqual(secondary_auditors[0].CPAZIPCODE, "10108") + + def test_missing_address_zipcode(self): + # Setup specific test data + secondary_auditors = [ + self.SecondaryAuditor(""), + ] + + xform_address_zipcode(secondary_auditors) + + self.assertEqual(secondary_auditors[0].CPAZIPCODE, settings.GSA_MIGRATION) + + +class TestXformCpaFirmName(SimpleTestCase): + class MockSecondaryAuditorHeader: + def __init__( + self, + DBKEY, + CPAFIRMNAME, + ): + self.DBKEY = DBKEY + self.CPAFIRMNAME = CPAFIRMNAME + + def _mock_secondaryauditor_header(self): + """Returns a mock secondary_auditor with all necessary fields.""" + return [ + self.MockSecondaryAuditorHeader( + DBKEY="123456789", + CPAFIRMNAME="John Doe CPA Firm", + ), + self.MockSecondaryAuditorHeader( + DBKEY="223456789", + CPAFIRMNAME="Jack C CPA Firm", + ), + ] + + def test_valid_cpafirm(self): + """Test that the function does not change the valid CPAFIRMNAME.""" + secondary_auditors = self._mock_secondaryauditor_header() + cpas = secondary_auditors + xform_cpafirmname(secondary_auditors) + for index in range(len(secondary_auditors)): + self.assertEqual( + secondary_auditors[index].CPAFIRMNAME, cpas[index].CPAFIRMNAME + ) + + def test_blank_cpafirm(self): + """Test that the function changes blank CPAFIRMNAME to GSA_MIGRATION.""" + secondary_auditors = self._mock_secondaryauditor_header() + secondary_auditors[0].CPAFIRMNAME = "" + cpas = secondary_auditors + xform_cpafirmname(secondary_auditors) + self.assertEqual(secondary_auditors[0].CPAFIRMNAME, settings.GSA_MIGRATION) + self.assertEqual(secondary_auditors[1].CPAFIRMNAME, cpas[1].CPAFIRMNAME) + + +class TestXformPadContactPhoneWithNine(SimpleTestCase): + + class MockSecondaryAuditorHeader: + + def __init__( + self, + CPAPHONE, + ): + self.CPAPHONE = CPAPHONE + + def setUp(self): + self.secondary_auditors = [ + self.MockSecondaryAuditorHeader(CPAPHONE="12345"), + self.MockSecondaryAuditorHeader(CPAPHONE="999999999"), + self.MockSecondaryAuditorHeader(CPAPHONE="1234567890"), + self.MockSecondaryAuditorHeader(CPAPHONE="98765432"), + ] + + def test_pad_contact_phone(self): + xform_pad_contact_phone_with_nine(self.secondary_auditors) + + self.assertEqual(self.secondary_auditors[0].CPAPHONE, "12345") # No change + self.assertEqual( + self.secondary_auditors[1].CPAPHONE, "9999999999" + ) # Pad applied + self.assertEqual(self.secondary_auditors[2].CPAPHONE, "1234567890") # No change + self.assertEqual(self.secondary_auditors[3].CPAPHONE, "98765432") # No change + + def test_change_records(self): + secondary_auditors = [self.MockSecondaryAuditorHeader(CPAPHONE="999999999")] + change_records_before = len(InspectionRecord.change["secondary_auditor"]) + + xform_pad_contact_phone_with_nine(secondary_auditors) + + change_records_after = len(InspectionRecord.change["secondary_auditor"]) + self.assertEqual(change_records_after, change_records_before + 1) diff --git a/backend/census_historical_migration/test_shared_xforms.py b/backend/census_historical_migration/test_shared_xforms.py index 0cd7c37045..cc507c3fda 100644 --- a/backend/census_historical_migration/test_shared_xforms.py +++ b/backend/census_historical_migration/test_shared_xforms.py @@ -1,265 +1,265 @@ -from django.conf import settings -from django.test import SimpleTestCase - -from .base_field_maps import FormFieldInDissem, FormFieldMap -from .exception_utils import ( - DataMigrationError, - DataMigrationValueError, -) -from .sac_general_lib.utils import ( - create_json_from_db_object, - normalize_year_string_or_exit, -) -from .transforms.xform_remove_hyphen_and_pad_zip import xform_remove_hyphen_and_pad_zip -from .transforms.xform_string_to_bool import string_to_bool -from .transforms.xform_string_to_int import string_to_int -from .transforms.xform_string_to_string import string_to_string -from .transforms.xform_uppercase_y_or_n import uppercase_y_or_n - - -class TestCreateJsonFromDbObject(SimpleTestCase): - """Tests for the create_json_from_db_object function.""" - - class MockDBObject: - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - mappings = [ - FormFieldMap( - "first_field_name", "IN_DB_FIRST_FIELD", FormFieldInDissem, "", str - ), - FormFieldMap( - "second_field_name", "IN_DB_SECOND_FIELD", FormFieldInDissem, None, bool - ), - FormFieldMap("third_field_name", None, FormFieldInDissem, "default_value", str), - ] - - def test_normal_case(self): - """Test that the function returns the correct JSON.""" - db_obj = self.MockDBObject(IN_DB_FIRST_FIELD="John", IN_DB_SECOND_FIELD="Y") - expected_json = { - "first_field_name": "John", - "second_field_name": True, - "third_field_name": "default_value", - } - self.assertEqual( - create_json_from_db_object(db_obj, self.mappings), expected_json - ) - - def test_missing_field(self): - """ - Test the function for correct JSON output when a database field is missing and the mapping has no default value. - """ - # In this case, second_field_name is not returned because the mapping for this field has no default value. - # and the value of the database field `IN_DB_SECOND_FIELD` is missing. - # `third_field_name` is returned because it has a default value of `default_value` - db_obj = self.MockDBObject(IN_DB_FIRST_FIELD="John") - expected_json = { - "first_field_name": "John", - "third_field_name": "default_value", - } - self.assertEqual( - create_json_from_db_object(db_obj, self.mappings), expected_json - ) - - def test_none_value(self): - """Test the function for correct JSON output when a database field has a value of None.""" - db_obj = self.MockDBObject(IN_DB_FIRST_FIELD=None, IN_DB_SECOND_FIELD=None) - # In this case, second_field_name is not returned because the mapping for this field has no default value. - # and the value of the database field `IN_DB_SECOND_FIELD` is None. - # `third_field_name` is returned because it has a default value of `default_value` - # `first_field_name` is returned because it has a default value of `""`. - expected_json = {"third_field_name": "default_value"} - self.assertEqual( - create_json_from_db_object(db_obj, self.mappings), expected_json - ) - - def test_all_default_values(self): - """Test the function for correct JSON output when all database fields have default values.""" - db_obj = self.MockDBObject() - # In this case, second_field_name is not returned because the mapping for this field has no default value. - # and the value of the database field `IN_DB_SECOND_FIELD` is None. - # `third_field_name` is returned because it has a default value of `default_value` - # `first_field_name` is returned because it has a default value of `""`. - expected_json = {"first_field_name": "", "third_field_name": "default_value"} - self.assertEqual( - create_json_from_db_object(db_obj, self.mappings), expected_json - ) - - -class TestStringToBool(SimpleTestCase): - """Tests for the string_to_bool function.""" - - def test_valid_boolean_string(self): - self.assertTrue(string_to_bool("Y")) - self.assertTrue(string_to_bool("y")) - self.assertFalse(string_to_bool("N")) - self.assertFalse(string_to_bool("n")) - - def test_string_with_spaces(self): - self.assertTrue(string_to_bool(" Y ")) - self.assertFalse(string_to_bool(" n")) - - def test_empty_string(self): - with self.assertRaises(DataMigrationValueError): - string_to_bool("") - - def test_non_string_input(self): - with self.assertRaises(DataMigrationValueError): - string_to_bool(123) - with self.assertRaises(DataMigrationValueError): - string_to_bool(None) - with self.assertRaises(DataMigrationValueError): - string_to_bool(["True"]) - with self.assertRaises(DataMigrationValueError): - string_to_bool(True) - - def test_string_length_more_than_one(self): - with self.assertRaises(DataMigrationValueError): - string_to_bool("Yes") - with self.assertRaises(DataMigrationValueError): - string_to_bool("No") - - def test_invalid_single_character_string(self): - with self.assertRaises(DataMigrationValueError): - string_to_bool("A") - with self.assertRaises(DataMigrationValueError): - string_to_bool("Z") - - -class TestUppercaseYOrN(SimpleTestCase): - """Tests for the uppercase_y_or_n function.""" - - def test_valid_boolean_string(self): - self.assertEqual(uppercase_y_or_n("Y"), "Y") - self.assertEqual(uppercase_y_or_n("y"), "Y") - self.assertEqual(uppercase_y_or_n("N"), "N") - self.assertEqual(uppercase_y_or_n("n"), "N") - - def test_string_with_spaces(self): - self.assertEqual(uppercase_y_or_n(" Y "), "Y") - self.assertEqual(uppercase_y_or_n(" n"), "N") - - def test_empty_string(self): - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n("") - - def test_gsa_migration(self): - self.assertEqual( - uppercase_y_or_n(settings.GSA_MIGRATION), settings.GSA_MIGRATION - ) - - def test_non_string_input(self): - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n(123) - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n(None) - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n(["True"]) - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n(True) - - def test_string_length_more_than_one(self): - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n("Yes") - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n("No") - - def test_invalid_single_character_string(self): - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n("A") - with self.assertRaises(DataMigrationValueError): - uppercase_y_or_n("Z") - - -class TestStringToInt(SimpleTestCase): - """Tests for the string_to_int function.""" - - def test_valid_integer_strings(self): - self.assertEqual(string_to_int("123"), 123) - self.assertEqual(string_to_int("-456"), -456) - self.assertEqual(string_to_int(" 789 "), 789) - - def test_invalid_strings(self): - with self.assertRaises(DataMigrationValueError): - string_to_int("abc") - with self.assertRaises(DataMigrationValueError): - string_to_int("123abc") - with self.assertRaises(DataMigrationValueError): - string_to_int("12.34") - - def test_empty_string(self): - with self.assertRaises(DataMigrationValueError): - string_to_int("") - - def test_non_string_input(self): - with self.assertRaises(DataMigrationValueError): - string_to_int(123) - with self.assertRaises(DataMigrationValueError): - string_to_int(None) - with self.assertRaises(DataMigrationValueError): - string_to_int([123]) - - -class TestStringToString(SimpleTestCase): - def test_valid_strings(self): - self.assertEqual(string_to_string("hello"), "hello") - self.assertEqual(string_to_string(" world "), "world") - self.assertEqual(string_to_string(" space both sides "), "space both sides") - - def test_none_input(self): - self.assertEqual(string_to_string(None), "") - - def test_non_string_input(self): - with self.assertRaises(DataMigrationValueError): - string_to_string(123) - with self.assertRaises(DataMigrationValueError): - string_to_string(True) - with self.assertRaises(DataMigrationValueError): - string_to_string([1, 2, 3]) - - def test_string_with_only_spaces(self): - self.assertEqual(string_to_string(" "), "") - - -class TestNormalizeYearString(SimpleTestCase): - def test_valid_short_year(self): - self.assertEqual(normalize_year_string_or_exit("21"), "2021") - self.assertEqual(normalize_year_string_or_exit("16"), "2016") - - def test_valid_full_year(self): - self.assertEqual(normalize_year_string_or_exit("2018"), "2018") - - def test_invalid_year_string(self): - with self.assertRaises(SystemExit): - normalize_year_string_or_exit("invalid") - - def test_year_out_of_range(self): - with self.assertRaises(SystemExit): - normalize_year_string_or_exit("15") - with self.assertRaises(SystemExit): - normalize_year_string_or_exit("2023") - - -class TestXformAddHyphenToZip(SimpleTestCase): - def test_five_digit_zip(self): - self.assertEqual(xform_remove_hyphen_and_pad_zip("12345"), "12345") - - def test_nine_digit_zip(self): - self.assertEqual(xform_remove_hyphen_and_pad_zip("123456789"), "123456789") - - def test_hyphenated_zip(self): - self.assertEqual(xform_remove_hyphen_and_pad_zip("12345-6789"), "123456789") - self.assertEqual(xform_remove_hyphen_and_pad_zip("1234-6789"), "012346789") - - def test_four_digit_zip(self): - self.assertEqual(xform_remove_hyphen_and_pad_zip("1234"), "01234") - - def test_eight_digit_zip(self): - self.assertEqual(xform_remove_hyphen_and_pad_zip("12345678"), "012345678") - - def test_malformed_zip(self): - with self.assertRaises(DataMigrationError): - xform_remove_hyphen_and_pad_zip("123") - with self.assertRaises(DataMigrationError): - xform_remove_hyphen_and_pad_zip("1234-678") +from django.conf import settings +from django.test import SimpleTestCase + +from .base_field_maps import FormFieldInDissem, FormFieldMap +from .exception_utils import ( + DataMigrationError, + DataMigrationValueError, +) +from .sac_general_lib.utils import ( + create_json_from_db_object, + normalize_year_string_or_exit, +) +from .transforms.xform_remove_hyphen_and_pad_zip import xform_remove_hyphen_and_pad_zip +from .transforms.xform_string_to_bool import string_to_bool +from .transforms.xform_string_to_int import string_to_int +from .transforms.xform_string_to_string import string_to_string +from .transforms.xform_uppercase_y_or_n import uppercase_y_or_n + + +class TestCreateJsonFromDbObject(SimpleTestCase): + """Tests for the create_json_from_db_object function.""" + + class MockDBObject: + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + mappings = [ + FormFieldMap( + "first_field_name", "IN_DB_FIRST_FIELD", FormFieldInDissem, "", str + ), + FormFieldMap( + "second_field_name", "IN_DB_SECOND_FIELD", FormFieldInDissem, None, bool + ), + FormFieldMap("third_field_name", None, FormFieldInDissem, "default_value", str), + ] + + def test_normal_case(self): + """Test that the function returns the correct JSON.""" + db_obj = self.MockDBObject(IN_DB_FIRST_FIELD="John", IN_DB_SECOND_FIELD="Y") + expected_json = { + "first_field_name": "John", + "second_field_name": True, + "third_field_name": "default_value", + } + self.assertEqual( + create_json_from_db_object(db_obj, self.mappings), expected_json + ) + + def test_missing_field(self): + """ + Test the function for correct JSON output when a database field is missing and the mapping has no default value. + """ + # In this case, second_field_name is not returned because the mapping for this field has no default value. + # and the value of the database field `IN_DB_SECOND_FIELD` is missing. + # `third_field_name` is returned because it has a default value of `default_value` + db_obj = self.MockDBObject(IN_DB_FIRST_FIELD="John") + expected_json = { + "first_field_name": "John", + "third_field_name": "default_value", + } + self.assertEqual( + create_json_from_db_object(db_obj, self.mappings), expected_json + ) + + def test_none_value(self): + """Test the function for correct JSON output when a database field has a value of None.""" + db_obj = self.MockDBObject(IN_DB_FIRST_FIELD=None, IN_DB_SECOND_FIELD=None) + # In this case, second_field_name is not returned because the mapping for this field has no default value. + # and the value of the database field `IN_DB_SECOND_FIELD` is None. + # `third_field_name` is returned because it has a default value of `default_value` + # `first_field_name` is returned because it has a default value of `""`. + expected_json = {"third_field_name": "default_value"} + self.assertEqual( + create_json_from_db_object(db_obj, self.mappings), expected_json + ) + + def test_all_default_values(self): + """Test the function for correct JSON output when all database fields have default values.""" + db_obj = self.MockDBObject() + # In this case, second_field_name is not returned because the mapping for this field has no default value. + # and the value of the database field `IN_DB_SECOND_FIELD` is None. + # `third_field_name` is returned because it has a default value of `default_value` + # `first_field_name` is returned because it has a default value of `""`. + expected_json = {"first_field_name": "", "third_field_name": "default_value"} + self.assertEqual( + create_json_from_db_object(db_obj, self.mappings), expected_json + ) + + +class TestStringToBool(SimpleTestCase): + """Tests for the string_to_bool function.""" + + def test_valid_boolean_string(self): + self.assertTrue(string_to_bool("Y")) + self.assertTrue(string_to_bool("y")) + self.assertFalse(string_to_bool("N")) + self.assertFalse(string_to_bool("n")) + + def test_string_with_spaces(self): + self.assertTrue(string_to_bool(" Y ")) + self.assertFalse(string_to_bool(" n")) + + def test_empty_string(self): + with self.assertRaises(DataMigrationValueError): + string_to_bool("") + + def test_non_string_input(self): + with self.assertRaises(DataMigrationValueError): + string_to_bool(123) + with self.assertRaises(DataMigrationValueError): + string_to_bool(None) + with self.assertRaises(DataMigrationValueError): + string_to_bool(["True"]) + with self.assertRaises(DataMigrationValueError): + string_to_bool(True) + + def test_string_length_more_than_one(self): + with self.assertRaises(DataMigrationValueError): + string_to_bool("Yes") + with self.assertRaises(DataMigrationValueError): + string_to_bool("No") + + def test_invalid_single_character_string(self): + with self.assertRaises(DataMigrationValueError): + string_to_bool("A") + with self.assertRaises(DataMigrationValueError): + string_to_bool("Z") + + +class TestUppercaseYOrN(SimpleTestCase): + """Tests for the uppercase_y_or_n function.""" + + def test_valid_boolean_string(self): + self.assertEqual(uppercase_y_or_n("Y"), "Y") + self.assertEqual(uppercase_y_or_n("y"), "Y") + self.assertEqual(uppercase_y_or_n("N"), "N") + self.assertEqual(uppercase_y_or_n("n"), "N") + + def test_string_with_spaces(self): + self.assertEqual(uppercase_y_or_n(" Y "), "Y") + self.assertEqual(uppercase_y_or_n(" n"), "N") + + def test_empty_string(self): + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n("") + + def test_gsa_migration(self): + self.assertEqual( + uppercase_y_or_n(settings.GSA_MIGRATION), settings.GSA_MIGRATION + ) + + def test_non_string_input(self): + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n(123) + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n(None) + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n(["True"]) + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n(True) + + def test_string_length_more_than_one(self): + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n("Yes") + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n("No") + + def test_invalid_single_character_string(self): + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n("A") + with self.assertRaises(DataMigrationValueError): + uppercase_y_or_n("Z") + + +class TestStringToInt(SimpleTestCase): + """Tests for the string_to_int function.""" + + def test_valid_integer_strings(self): + self.assertEqual(string_to_int("123"), 123) + self.assertEqual(string_to_int("-456"), -456) + self.assertEqual(string_to_int(" 789 "), 789) + + def test_invalid_strings(self): + with self.assertRaises(DataMigrationValueError): + string_to_int("abc") + with self.assertRaises(DataMigrationValueError): + string_to_int("123abc") + with self.assertRaises(DataMigrationValueError): + string_to_int("12.34") + + def test_empty_string(self): + with self.assertRaises(DataMigrationValueError): + string_to_int("") + + def test_non_string_input(self): + with self.assertRaises(DataMigrationValueError): + string_to_int(123) + with self.assertRaises(DataMigrationValueError): + string_to_int(None) + with self.assertRaises(DataMigrationValueError): + string_to_int([123]) + + +class TestStringToString(SimpleTestCase): + def test_valid_strings(self): + self.assertEqual(string_to_string("hello"), "hello") + self.assertEqual(string_to_string(" world "), "world") + self.assertEqual(string_to_string(" space both sides "), "space both sides") + + def test_none_input(self): + self.assertEqual(string_to_string(None), "") + + def test_non_string_input(self): + with self.assertRaises(DataMigrationValueError): + string_to_string(123) + with self.assertRaises(DataMigrationValueError): + string_to_string(True) + with self.assertRaises(DataMigrationValueError): + string_to_string([1, 2, 3]) + + def test_string_with_only_spaces(self): + self.assertEqual(string_to_string(" "), "") + + +class TestNormalizeYearString(SimpleTestCase): + def test_valid_short_year(self): + self.assertEqual(normalize_year_string_or_exit("21"), "2021") + self.assertEqual(normalize_year_string_or_exit("16"), "2016") + + def test_valid_full_year(self): + self.assertEqual(normalize_year_string_or_exit("2018"), "2018") + + def test_invalid_year_string(self): + with self.assertRaises(SystemExit): + normalize_year_string_or_exit("invalid") + + def test_year_out_of_range(self): + with self.assertRaises(SystemExit): + normalize_year_string_or_exit("15") + with self.assertRaises(SystemExit): + normalize_year_string_or_exit("2023") + + +class TestXformAddHyphenToZip(SimpleTestCase): + def test_five_digit_zip(self): + self.assertEqual(xform_remove_hyphen_and_pad_zip("12345"), "12345") + + def test_nine_digit_zip(self): + self.assertEqual(xform_remove_hyphen_and_pad_zip("123456789"), "123456789") + + def test_hyphenated_zip(self): + self.assertEqual(xform_remove_hyphen_and_pad_zip("12345-6789"), "123456789") + self.assertEqual(xform_remove_hyphen_and_pad_zip("1234-6789"), "012346789") + + def test_four_digit_zip(self): + self.assertEqual(xform_remove_hyphen_and_pad_zip("1234"), "01234") + + def test_eight_digit_zip(self): + self.assertEqual(xform_remove_hyphen_and_pad_zip("12345678"), "012345678") + + def test_malformed_zip(self): + with self.assertRaises(DataMigrationError): + xform_remove_hyphen_and_pad_zip("123") + with self.assertRaises(DataMigrationError): + xform_remove_hyphen_and_pad_zip("1234-678") diff --git a/backend/census_historical_migration/throwaway_scripts/reprocess_migration_cli_commands.py b/backend/census_historical_migration/throwaway_scripts/reprocess_migration_cli_commands.py index f6e1ae1aac..09005e5f9f 100644 --- a/backend/census_historical_migration/throwaway_scripts/reprocess_migration_cli_commands.py +++ b/backend/census_historical_migration/throwaway_scripts/reprocess_migration_cli_commands.py @@ -1,36 +1,36 @@ -import argparse -import time - -from util import ( - trigger_migration_workflow, -) -import subprocess # nosec - -# This script is a one-off to reprocess mdata migration for a failed -# migration attempt associated with a specific error tag. -# Command is `python reprocess_migration_cli_commands.py year total_records pages_per_instance instances error_tag` -# `python reprocess_migration_cli_commands.py 2022 42000 5 80 invalid_email_error` - -parser = argparse.ArgumentParser( - description="Trigger data migration Github Actions through gh API calls" -) -parser.add_argument("year", type=int, help="Audit year") -parser.add_argument("total_records", type=int, help="Total records.") -parser.add_argument("pages_per_instance", type=int, help="Pages per instance.") -parser.add_argument("instances", type=int, help="How many parallel istances to run") -parser.add_argument("error_tag", type=str, help="Error tag to reprocess") -args = parser.parse_args() - - -if __name__ == "__main__": - cmds = trigger_migration_workflow( - args, workflow_name="failed-data-migration-reprocessor.yml" - ) - print(args) - for ndx, cmd in enumerate(cmds): - print(f"# Instance {ndx + 1}") - cmd.append("-f") - cmd.append(f"error_tag={args.error_tag}") - print(" ".join(cmd)) - subprocess.run(cmd) # nosec - time.sleep(15) +import argparse +import time + +from util import ( + trigger_migration_workflow, +) +import subprocess # nosec + +# This script is a one-off to reprocess mdata migration for a failed +# migration attempt associated with a specific error tag. +# Command is `python reprocess_migration_cli_commands.py year total_records pages_per_instance instances error_tag` +# `python reprocess_migration_cli_commands.py 2022 42000 5 80 invalid_email_error` + +parser = argparse.ArgumentParser( + description="Trigger data migration Github Actions through gh API calls" +) +parser.add_argument("year", type=int, help="Audit year") +parser.add_argument("total_records", type=int, help="Total records.") +parser.add_argument("pages_per_instance", type=int, help="Pages per instance.") +parser.add_argument("instances", type=int, help="How many parallel istances to run") +parser.add_argument("error_tag", type=str, help="Error tag to reprocess") +args = parser.parse_args() + + +if __name__ == "__main__": + cmds = trigger_migration_workflow( + args, workflow_name="failed-data-migration-reprocessor.yml" + ) + print(args) + for ndx, cmd in enumerate(cmds): + print(f"# Instance {ndx + 1}") + cmd.append("-f") + cmd.append(f"error_tag={args.error_tag}") + print(" ".join(cmd)) + subprocess.run(cmd) # nosec + time.sleep(15) diff --git a/backend/census_historical_migration/throwaway_scripts/start_process_cli_commands.py b/backend/census_historical_migration/throwaway_scripts/start_process_cli_commands.py index 7b8052a26e..b7be200650 100644 --- a/backend/census_historical_migration/throwaway_scripts/start_process_cli_commands.py +++ b/backend/census_historical_migration/throwaway_scripts/start_process_cli_commands.py @@ -1,54 +1,54 @@ -import argparse -import subprocess # nosec -import time - -from util import trigger_migration_workflow - -# This throwaway script spits out code that can be -# copy-pasted into a bash script, or directly into the command line. -# Run from within the Github repo, it should launch instances -# of the historic data migration code. - -parser = argparse.ArgumentParser( - description="Trigger data migration Github Actions through gh API calls." -) -parser.add_argument("year", type=int, help="Year to run (2022, 2021, etc.)") -parser.add_argument("total_records", type=int, help="Total records.") -parser.add_argument("pages_per_instance", type=int, help="Pages per instance.") -parser.add_argument("instances", type=int, help="How many parallel istances to run") - -args = parser.parse_args() - - -if __name__ == "__main__": - cmds = trigger_migration_workflow(args) - for ndx, cmd in enumerate(cmds): - print(f"# Instance {ndx + 1}") - cmd = " ".join(cmd) - print(cmd) - subprocess.run(cmd) # nosec - time.sleep(15) - - -# Examples - -# With round numbers, it comes out nice and tidy. -# python start_process_cli_commands.py 2022 42000 5 80 -# Each instance must run 525 records. -# With 5 pages per instance, the page size is 105. -# There are 400 pages in total. -# This means we will attempt 42000 records. - -# Off-by-one, and we make sure we don't drop that extra. -# python start_process_cli_commands.py 2022 42001 5 80 -# Each instance must run 526 records. -# With 5 pages per instance, the page size is 106. -# There are 397 pages in total. -# This means we will attempt 42082 records. - -# More pages, and we get closer to the exact number. -# python start_process_cli_commands.py 2022 42001 10 80 -# Each instance must run 526 records. -# With 10 pages per instance, the page size is 53. -# There are 793 pages in total. -# This means we will attempt 42029 records. +import argparse +import subprocess # nosec +import time + +from util import trigger_migration_workflow + +# This throwaway script spits out code that can be +# copy-pasted into a bash script, or directly into the command line. +# Run from within the Github repo, it should launch instances +# of the historic data migration code. + +parser = argparse.ArgumentParser( + description="Trigger data migration Github Actions through gh API calls." +) +parser.add_argument("year", type=int, help="Year to run (2022, 2021, etc.)") +parser.add_argument("total_records", type=int, help="Total records.") +parser.add_argument("pages_per_instance", type=int, help="Pages per instance.") +parser.add_argument("instances", type=int, help="How many parallel istances to run") + +args = parser.parse_args() + + +if __name__ == "__main__": + cmds = trigger_migration_workflow(args) + for ndx, cmd in enumerate(cmds): + print(f"# Instance {ndx + 1}") + cmd = " ".join(cmd) + print(cmd) + subprocess.run(cmd) # nosec + time.sleep(15) + + +# Examples + +# With round numbers, it comes out nice and tidy. +# python start_process_cli_commands.py 2022 42000 5 80 +# Each instance must run 525 records. +# With 5 pages per instance, the page size is 105. +# There are 400 pages in total. +# This means we will attempt 42000 records. + +# Off-by-one, and we make sure we don't drop that extra. +# python start_process_cli_commands.py 2022 42001 5 80 +# Each instance must run 526 records. +# With 5 pages per instance, the page size is 106. +# There are 397 pages in total. +# This means we will attempt 42082 records. + +# More pages, and we get closer to the exact number. +# python start_process_cli_commands.py 2022 42001 10 80 +# Each instance must run 526 records. +# With 10 pages per instance, the page size is 53. +# There are 793 pages in total. +# This means we will attempt 42029 records. diff --git a/backend/census_historical_migration/throwaway_scripts/stop_process_cli_commands.py b/backend/census_historical_migration/throwaway_scripts/stop_process_cli_commands.py index 922adcb574..bf8e3ba334 100644 --- a/backend/census_historical_migration/throwaway_scripts/stop_process_cli_commands.py +++ b/backend/census_historical_migration/throwaway_scripts/stop_process_cli_commands.py @@ -1,91 +1,91 @@ -import argparse -import re -import subprocess # nosec -import sys - -# This script stops running tasks for a specified app space in Cloud Foundry. -# It identifies tasks by their ID. If no specific IDs are given, it will attempt to stop all currently running tasks. - - -def check_cf_environment(): - # Run 'cf target' and capture the output - result = subprocess.run(["cf", "target"], capture_output=True, text=True) # nosec - - # Check if the command was successful - if result.returncode != 0: - print("Error retrieving Cloud Foundry target information") - sys.exit(1) - - return "preview" in result.stdout - - -def get_running_task_ids(): - # Run `cf tasks gsa-fac` and capture the output - result = subprocess.run( - ["cf", "tasks", "gsa-fac"], capture_output=True, text=True - ) # nosec - - if result.returncode != 0: - print("Error running command") - return [] - - # Process the output - output = result.stdout - lines = output.split("\n") - running_tasks = [line for line in lines if "RUNNING" in line] - - # Extract IDs - task_ids = [] - for task in running_tasks: - match = re.match(r"(\d+)", task) - if match: - task_ids.append(match.group(1)) - - return task_ids - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Terminate running tasks.") - parser.add_argument( - "--start_id", type=int, help="Id of first instance", required=False - ) - parser.add_argument( - "--end_id", type=int, help="Id of last instance", required=False - ) - - args = parser.parse_args() - - if not check_cf_environment(): - print("This script can only be run in the PREVIEW environment.") - sys.exit(1) - - # If start_id and end_id are not provided - if args.start_id is None or args.end_id is None: - user_input = input("Terminate all running tasks? (Y/N): ") - if user_input.lower() == "y": - running_ids = get_running_task_ids() - print("Terminating Running task IDs:", running_ids) - for id in running_ids: - print(f"# Stopping process {id}") - cmd = ["cf", "terminate-task", "gsa-fac", id] - subprocess.run(cmd) # nosec - else: - for id in range(int(args.start_id), int(args.end_id) + 1): - # cf terminate-task gsa-fac - print(f"# Stopping process {id}") - cmd = [ - "cf", - "terminate-task", - "gsa-fac", - f"{id}", - ] - - print(" ".join(cmd)) - subprocess.run(cmd) # nosec - - -# Examples - -# python stop_process_cli_commands.py 20 50 -# will stop all instances with IDs 20 through 50, inclusive. -# To see the running instances IDs, run `cf tasks gsa-fac` +import argparse +import re +import subprocess # nosec +import sys + +# This script stops running tasks for a specified app space in Cloud Foundry. +# It identifies tasks by their ID. If no specific IDs are given, it will attempt to stop all currently running tasks. + + +def check_cf_environment(): + # Run 'cf target' and capture the output + result = subprocess.run(["cf", "target"], capture_output=True, text=True) # nosec + + # Check if the command was successful + if result.returncode != 0: + print("Error retrieving Cloud Foundry target information") + sys.exit(1) + + return "preview" in result.stdout + + +def get_running_task_ids(): + # Run `cf tasks gsa-fac` and capture the output + result = subprocess.run( + ["cf", "tasks", "gsa-fac"], capture_output=True, text=True + ) # nosec + + if result.returncode != 0: + print("Error running command") + return [] + + # Process the output + output = result.stdout + lines = output.split("\n") + running_tasks = [line for line in lines if "RUNNING" in line] + + # Extract IDs + task_ids = [] + for task in running_tasks: + match = re.match(r"(\d+)", task) + if match: + task_ids.append(match.group(1)) + + return task_ids + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Terminate running tasks.") + parser.add_argument( + "--start_id", type=int, help="Id of first instance", required=False + ) + parser.add_argument( + "--end_id", type=int, help="Id of last instance", required=False + ) + + args = parser.parse_args() + + if not check_cf_environment(): + print("This script can only be run in the PREVIEW environment.") + sys.exit(1) + + # If start_id and end_id are not provided + if args.start_id is None or args.end_id is None: + user_input = input("Terminate all running tasks? (Y/N): ") + if user_input.lower() == "y": + running_ids = get_running_task_ids() + print("Terminating Running task IDs:", running_ids) + for id in running_ids: + print(f"# Stopping process {id}") + cmd = ["cf", "terminate-task", "gsa-fac", id] + subprocess.run(cmd) # nosec + else: + for id in range(int(args.start_id), int(args.end_id) + 1): + # cf terminate-task gsa-fac + print(f"# Stopping process {id}") + cmd = [ + "cf", + "terminate-task", + "gsa-fac", + f"{id}", + ] + + print(" ".join(cmd)) + subprocess.run(cmd) # nosec + + +# Examples + +# python stop_process_cli_commands.py 20 50 +# will stop all instances with IDs 20 through 50, inclusive. +# To see the running instances IDs, run `cf tasks gsa-fac` diff --git a/backend/census_historical_migration/throwaway_scripts/util.py b/backend/census_historical_migration/throwaway_scripts/util.py index 7dce59ffed..e4abbc4170 100644 --- a/backend/census_historical_migration/throwaway_scripts/util.py +++ b/backend/census_historical_migration/throwaway_scripts/util.py @@ -1,50 +1,50 @@ -# Ceiling division -# We need to always round *up* so we don't miss any records. -import math - - -def cdiv(a, b): - return math.ceil(a / b) - - -def chunks(lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield list(map(str, lst[i : i + n])) - - -def trigger_migration_workflow( - args, workflow_name="historic-data-migrator-with-pagination.yml" -): - # The number of pages is the total records // instances - recs_per_instance = cdiv(args.total_records, args.instances) - print(f"Each instance must run {recs_per_instance} records.") - page_size = cdiv(recs_per_instance, args.pages_per_instance) - print( - f"With {args.pages_per_instance} pages per instance, the page size is {page_size}." - ) - total_pages = cdiv(args.total_records, page_size) - print(f"There are {total_pages} pages in total.") - print(f"This means we will attempt {total_pages * page_size} records.") - # Run one extra page for good measure. This "rounds up." - page_chunks = chunks(range(1, total_pages + 1), args.pages_per_instance) - cmds = [] - for ndx, page_set in enumerate(page_chunks): - # gh workflow run historic-data-migrator-with-pagination.yml -f environment=preview -f year=2022 -f page_size=1 -f pages=1 - cmds.append( - [ - "gh", - "workflow", - "run", - f"{workflow_name}", - "-f", - "environment=preview", - "-f", - f"year={args.year}", - "-f", - f"page_size={page_size}", - "-f", - "pages={}".format(",".join(page_set)), - ] - ) - return cmds +# Ceiling division +# We need to always round *up* so we don't miss any records. +import math + + +def cdiv(a, b): + return math.ceil(a / b) + + +def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield list(map(str, lst[i : i + n])) + + +def trigger_migration_workflow( + args, workflow_name="historic-data-migrator-with-pagination.yml" +): + # The number of pages is the total records // instances + recs_per_instance = cdiv(args.total_records, args.instances) + print(f"Each instance must run {recs_per_instance} records.") + page_size = cdiv(recs_per_instance, args.pages_per_instance) + print( + f"With {args.pages_per_instance} pages per instance, the page size is {page_size}." + ) + total_pages = cdiv(args.total_records, page_size) + print(f"There are {total_pages} pages in total.") + print(f"This means we will attempt {total_pages * page_size} records.") + # Run one extra page for good measure. This "rounds up." + page_chunks = chunks(range(1, total_pages + 1), args.pages_per_instance) + cmds = [] + for ndx, page_set in enumerate(page_chunks): + # gh workflow run historic-data-migrator-with-pagination.yml -f environment=preview -f year=2022 -f page_size=1 -f pages=1 + cmds.append( + [ + "gh", + "workflow", + "run", + f"{workflow_name}", + "-f", + "environment=preview", + "-f", + f"year={args.year}", + "-f", + f"page_size={page_size}", + "-f", + "pages={}".format(",".join(page_set)), + ] + ) + return cmds diff --git a/backend/census_historical_migration/transforms/xform_remove_hyphen_and_pad_zip.py b/backend/census_historical_migration/transforms/xform_remove_hyphen_and_pad_zip.py index c39663bf37..05eef94953 100644 --- a/backend/census_historical_migration/transforms/xform_remove_hyphen_and_pad_zip.py +++ b/backend/census_historical_migration/transforms/xform_remove_hyphen_and_pad_zip.py @@ -1,39 +1,39 @@ -from ..exception_utils import DataMigrationError -from .xform_string_to_string import string_to_string -from django.conf import settings - - -def xform_remove_hyphen_and_pad_zip(zip): - """ - Transform a ZIP code string by removing hyphen if present and adding a leading zero when necessary. - - If the ZIP is settings.GSA_MIGRATION, return it - - If the ZIP code has hyphen, remove that character. - - If the ZIP code has 4 or 8 digits, pads with a leading zero. - - Returns the ZIP code if it has 5 digits or 9 digitis (after padding if needed). - - Raises an error for other cases. - """ - # Transformation to be documented. - - if zip == settings.GSA_MIGRATION: - return zip - - strzip = string_to_string(zip) - if "-" in strzip: - parts = strzip.split("-") - if len(parts[1]) == 4: # Check if there are exactly four digits after hyphen - strzip = "".join(parts) - else: - raise DataMigrationError( - f"Zip code {strzip} is malformed.", - "invalid_zip", - ) - - if len(strzip) in [4, 8]: - strzip = "0" + strzip - if len(strzip) == 5 or len(strzip) == 9: - return strzip - else: - raise DataMigrationError( - f"Zip code {strzip} is malformed.", - "invalid_zip", - ) +from ..exception_utils import DataMigrationError +from .xform_string_to_string import string_to_string +from django.conf import settings + + +def xform_remove_hyphen_and_pad_zip(zip): + """ + Transform a ZIP code string by removing hyphen if present and adding a leading zero when necessary. + - If the ZIP is settings.GSA_MIGRATION, return it + - If the ZIP code has hyphen, remove that character. + - If the ZIP code has 4 or 8 digits, pads with a leading zero. + - Returns the ZIP code if it has 5 digits or 9 digitis (after padding if needed). + - Raises an error for other cases. + """ + # Transformation to be documented. + + if zip == settings.GSA_MIGRATION: + return zip + + strzip = string_to_string(zip) + if "-" in strzip: + parts = strzip.split("-") + if len(parts[1]) == 4: # Check if there are exactly four digits after hyphen + strzip = "".join(parts) + else: + raise DataMigrationError( + f"Zip code {strzip} is malformed.", + "invalid_zip", + ) + + if len(strzip) in [4, 8]: + strzip = "0" + strzip + if len(strzip) == 5 or len(strzip) == 9: + return strzip + else: + raise DataMigrationError( + f"Zip code {strzip} is malformed.", + "invalid_zip", + ) diff --git a/backend/census_historical_migration/transforms/xform_retrieve_uei.py b/backend/census_historical_migration/transforms/xform_retrieve_uei.py index c6452cd872..05f419041d 100644 --- a/backend/census_historical_migration/transforms/xform_retrieve_uei.py +++ b/backend/census_historical_migration/transforms/xform_retrieve_uei.py @@ -1,12 +1,12 @@ -from django.conf import settings -from .xform_string_to_string import string_to_string - - -def xform_retrieve_uei(uei): - """Returns the stripped UEI if it exists, otherwise returns `GSA_MIGRATION`.""" - # Transformation to be documented. - str_uei = string_to_string(uei) - if not str_uei: - return settings.GSA_MIGRATION - - return str_uei +from django.conf import settings +from .xform_string_to_string import string_to_string + + +def xform_retrieve_uei(uei): + """Returns the stripped UEI if it exists, otherwise returns `GSA_MIGRATION`.""" + # Transformation to be documented. + str_uei = string_to_string(uei) + if not str_uei: + return settings.GSA_MIGRATION + + return str_uei diff --git a/backend/census_historical_migration/transforms/xform_string_to_bool.py b/backend/census_historical_migration/transforms/xform_string_to_bool.py index c7ef05472c..16f9bbcc8f 100644 --- a/backend/census_historical_migration/transforms/xform_string_to_bool.py +++ b/backend/census_historical_migration/transforms/xform_string_to_bool.py @@ -1,25 +1,25 @@ -from ..exception_utils import DataMigrationValueError -from django.conf import settings - - -def string_to_bool(value): - """Converts a string to a boolean.""" - - if not isinstance(value, str): - raise DataMigrationValueError( - f"Expected string, got {type(value).__name__}", - "invalid_str_type", - ) - - new_value = value.strip().upper() - - if new_value == settings.GSA_MIGRATION: - return new_value - - if new_value not in ["Y", "N"]: - raise DataMigrationValueError( - f"Expected 'Y' or 'N', got '{value}'", - "invalid_y_or_n", - ) - - return new_value == "Y" +from ..exception_utils import DataMigrationValueError +from django.conf import settings + + +def string_to_bool(value): + """Converts a string to a boolean.""" + + if not isinstance(value, str): + raise DataMigrationValueError( + f"Expected string, got {type(value).__name__}", + "invalid_str_type", + ) + + new_value = value.strip().upper() + + if new_value == settings.GSA_MIGRATION: + return new_value + + if new_value not in ["Y", "N"]: + raise DataMigrationValueError( + f"Expected 'Y' or 'N', got '{value}'", + "invalid_y_or_n", + ) + + return new_value == "Y" diff --git a/backend/census_historical_migration/transforms/xform_string_to_int.py b/backend/census_historical_migration/transforms/xform_string_to_int.py index 5d8c7ec540..54f13b4a88 100644 --- a/backend/census_historical_migration/transforms/xform_string_to_int.py +++ b/backend/census_historical_migration/transforms/xform_string_to_int.py @@ -1,20 +1,20 @@ -from ..exception_utils import DataMigrationValueError - - -def string_to_int(value): - """Converts a string to an integer.""" - - if not isinstance(value, str): - raise DataMigrationValueError( - f"Expected string, got {type(value).__name__}", - "invalid_str_type", - ) - value = value.strip() - # Check if the string can be converted to an integer - try: - return int(value) - except ValueError: - raise DataMigrationValueError( - f"Cannot convert string to integer: '{value}'", - "str_to_int_conversion", - ) +from ..exception_utils import DataMigrationValueError + + +def string_to_int(value): + """Converts a string to an integer.""" + + if not isinstance(value, str): + raise DataMigrationValueError( + f"Expected string, got {type(value).__name__}", + "invalid_str_type", + ) + value = value.strip() + # Check if the string can be converted to an integer + try: + return int(value) + except ValueError: + raise DataMigrationValueError( + f"Cannot convert string to integer: '{value}'", + "str_to_int_conversion", + ) diff --git a/backend/census_historical_migration/transforms/xform_string_to_string.py b/backend/census_historical_migration/transforms/xform_string_to_string.py index 01ceca7a4d..ad2b675025 100644 --- a/backend/census_historical_migration/transforms/xform_string_to_string.py +++ b/backend/census_historical_migration/transforms/xform_string_to_string.py @@ -1,16 +1,16 @@ -from ..exception_utils import DataMigrationValueError - - -def string_to_string(value): - """ - Converts a string to a trimmed string. Returns an empty string if the input - is None.""" - if value is None: - return "" - elif not isinstance(value, str): - raise DataMigrationValueError( - f"Expected string, got {type(value).__name__}", - "invalid_str_type", - ) - - return value.strip() +from ..exception_utils import DataMigrationValueError + + +def string_to_string(value): + """ + Converts a string to a trimmed string. Returns an empty string if the input + is None.""" + if value is None: + return "" + elif not isinstance(value, str): + raise DataMigrationValueError( + f"Expected string, got {type(value).__name__}", + "invalid_str_type", + ) + + return value.strip() diff --git a/backend/census_historical_migration/transforms/xform_uppercase_y_or_n.py b/backend/census_historical_migration/transforms/xform_uppercase_y_or_n.py index 08e9e6857e..92e2a1e536 100644 --- a/backend/census_historical_migration/transforms/xform_uppercase_y_or_n.py +++ b/backend/census_historical_migration/transforms/xform_uppercase_y_or_n.py @@ -1,25 +1,25 @@ -from ..exception_utils import DataMigrationValueError - -from django.conf import settings - - -def uppercase_y_or_n(value): - """Converts 'y' to 'Y' and 'n' to 'N', and allows GSA_MIGRATION""" - - if not isinstance(value, str): - raise DataMigrationValueError( - f"Expected string, got {type(value).__name__}", - "uppercase_y_or_n", - ) - - if value == settings.GSA_MIGRATION: - return value - - new_value = value.strip().upper() - if new_value not in ["Y", "N"]: - raise DataMigrationValueError( - f"Expected 'Y', 'y', 'N', 'n', or ${settings.GSA_MIGRATION}, got '{value}'", - "uppercase_y_or_n", - ) - - return new_value +from ..exception_utils import DataMigrationValueError + +from django.conf import settings + + +def uppercase_y_or_n(value): + """Converts 'y' to 'Y' and 'n' to 'N', and allows GSA_MIGRATION""" + + if not isinstance(value, str): + raise DataMigrationValueError( + f"Expected string, got {type(value).__name__}", + "uppercase_y_or_n", + ) + + if value == settings.GSA_MIGRATION: + return value + + new_value = value.strip().upper() + if new_value not in ["Y", "N"]: + raise DataMigrationValueError( + f"Expected 'Y', 'y', 'N', 'n', or ${settings.GSA_MIGRATION}, got '{value}'", + "uppercase_y_or_n", + ) + + return new_value diff --git a/backend/census_historical_migration/views.py b/backend/census_historical_migration/views.py index 9713e9c601..7a02d1fd0d 100644 --- a/backend/census_historical_migration/views.py +++ b/backend/census_historical_migration/views.py @@ -1,3 +1,3 @@ -from django.shortcuts import render # noqa: F401 - -# Create your views here. +from django.shortcuts import render # noqa: F401 + +# Create your views here. diff --git a/backend/census_historical_migration/workbooklib/workbook_section_handlers.py b/backend/census_historical_migration/workbooklib/workbook_section_handlers.py index f082f8621b..879927173e 100644 --- a/backend/census_historical_migration/workbooklib/workbook_section_handlers.py +++ b/backend/census_historical_migration/workbooklib/workbook_section_handlers.py @@ -1,34 +1,34 @@ -from audit.fixtures.excel import FORM_SECTIONS - - -from ..workbooklib.notes_to_sefa import generate_notes_to_sefa -from ..workbooklib.federal_awards import ( - generate_federal_awards, -) -from ..workbooklib.findings import generate_findings -from ..workbooklib.findings_text import generate_findings_text -from ..workbooklib.corrective_action_plan import ( - generate_corrective_action_plan, -) -from ..workbooklib.additional_ueis import ( - generate_additional_ueis, -) -from ..workbooklib.additional_eins import ( - generate_additional_eins, -) -from ..workbooklib.secondary_auditors import ( - generate_secondary_auditors, -) - - -sections_to_handlers = { - FORM_SECTIONS.ADDITIONAL_EINS: generate_additional_eins, - FORM_SECTIONS.ADDITIONAL_UEIS: generate_additional_ueis, - FORM_SECTIONS.ADDITIONAL_UEIS: generate_additional_ueis, - FORM_SECTIONS.CORRECTIVE_ACTION_PLAN: generate_corrective_action_plan, - FORM_SECTIONS.FEDERAL_AWARDS: generate_federal_awards, - FORM_SECTIONS.FINDINGS_TEXT: generate_findings_text, - FORM_SECTIONS.FINDINGS_UNIFORM_GUIDANCE: generate_findings, - FORM_SECTIONS.NOTES_TO_SEFA: generate_notes_to_sefa, - FORM_SECTIONS.SECONDARY_AUDITORS: generate_secondary_auditors, -} +from audit.fixtures.excel import FORM_SECTIONS + + +from ..workbooklib.notes_to_sefa import generate_notes_to_sefa +from ..workbooklib.federal_awards import ( + generate_federal_awards, +) +from ..workbooklib.findings import generate_findings +from ..workbooklib.findings_text import generate_findings_text +from ..workbooklib.corrective_action_plan import ( + generate_corrective_action_plan, +) +from ..workbooklib.additional_ueis import ( + generate_additional_ueis, +) +from ..workbooklib.additional_eins import ( + generate_additional_eins, +) +from ..workbooklib.secondary_auditors import ( + generate_secondary_auditors, +) + + +sections_to_handlers = { + FORM_SECTIONS.ADDITIONAL_EINS: generate_additional_eins, + FORM_SECTIONS.ADDITIONAL_UEIS: generate_additional_ueis, + FORM_SECTIONS.ADDITIONAL_UEIS: generate_additional_ueis, + FORM_SECTIONS.CORRECTIVE_ACTION_PLAN: generate_corrective_action_plan, + FORM_SECTIONS.FEDERAL_AWARDS: generate_federal_awards, + FORM_SECTIONS.FINDINGS_TEXT: generate_findings_text, + FORM_SECTIONS.FINDINGS_UNIFORM_GUIDANCE: generate_findings, + FORM_SECTIONS.NOTES_TO_SEFA: generate_notes_to_sefa, + FORM_SECTIONS.SECONDARY_AUDITORS: generate_secondary_auditors, +} diff --git a/backend/config/db_url.py b/backend/config/db_url.py index ce57d7bed7..d84419065d 100644 --- a/backend/config/db_url.py +++ b/backend/config/db_url.py @@ -1,17 +1,17 @@ -from django.core.exceptions import ImproperlyConfigured - - -def get_db_url_from_vcap_services( - vcap, -): - database_url = None - for db_service in vcap.get("aws-rds", []): - if db_service.get("instance_name") == "fac-db": - database_url = db_service["credentials"]["uri"] - break - - if not database_url: - raise ImproperlyConfigured( - "Database URL is not properly configured. Expected 'fac-db' URL." - ) - return database_url +from django.core.exceptions import ImproperlyConfigured + + +def get_db_url_from_vcap_services( + vcap, +): + database_url = None + for db_service in vcap.get("aws-rds", []): + if db_service.get("instance_name") == "fac-db": + database_url = db_service["credentials"]["uri"] + break + + if not database_url: + raise ImproperlyConfigured( + "Database URL is not properly configured. Expected 'fac-db' URL." + ) + return database_url diff --git a/backend/manifests/manifest-preview.yml b/backend/manifests/manifest-preview.yml index 2d51575d11..b29d59b53a 100644 --- a/backend/manifests/manifest-preview.yml +++ b/backend/manifests/manifest-preview.yml @@ -1,30 +1,30 @@ ---- -applications: - - name: ((app_name)) - buildpacks: - - https://github.com/cloudfoundry/apt-buildpack - - python_buildpack - memory: ((mem_amount)) - path: ../ - timeout: 180 - disk_quota: 2G - env: - ENV: ((cf_env_name)) - DJANGO_BASE_URL: https://((endpoint)) - ALLOWED_HOSTS: ((endpoint)) fac-((env_name)).app.cloud.gov - AV_SCAN_URL: https://fac-av-((service_name)).apps.internal:61443/scan - # DISABLE_COLLECTSTATIC: true - routes: - - route: fac-((env_name)).app.cloud.gov - instances: ((instances)) - services: - - fac-db - - fac-public-s3 - - fac-private-s3 - - fac-key-service - - newrelic-creds - - https-proxy-creds - - smtp-proxy-creds - - fac-logdrain - - fac-snapshot-db - - backups +--- +applications: + - name: ((app_name)) + buildpacks: + - https://github.com/cloudfoundry/apt-buildpack + - python_buildpack + memory: ((mem_amount)) + path: ../ + timeout: 180 + disk_quota: 2G + env: + ENV: ((cf_env_name)) + DJANGO_BASE_URL: https://((endpoint)) + ALLOWED_HOSTS: ((endpoint)) fac-((env_name)).app.cloud.gov + AV_SCAN_URL: https://fac-av-((service_name)).apps.internal:61443/scan + # DISABLE_COLLECTSTATIC: true + routes: + - route: fac-((env_name)).app.cloud.gov + instances: ((instances)) + services: + - fac-db + - fac-public-s3 + - fac-private-s3 + - fac-key-service + - newrelic-creds + - https-proxy-creds + - smtp-proxy-creds + - fac-logdrain + - fac-snapshot-db + - backups diff --git a/backend/manifests/vars/vars-staging.yml b/backend/manifests/vars/vars-staging.yml index f35a00a90b..a22428f51f 100644 --- a/backend/manifests/vars/vars-staging.yml +++ b/backend/manifests/vars/vars-staging.yml @@ -1,8 +1,8 @@ -app_name: gsa-fac -mem_amount: 4G -cf_env_name: STAGING -env_name: staging -service_name: staging -endpoint: fac-staging.app.cloud.gov -instances: 1 - +app_name: gsa-fac +mem_amount: 4G +cf_env_name: STAGING +env_name: staging +service_name: staging +endpoint: fac-staging.app.cloud.gov +instances: 1 + diff --git a/backend/report_submission/context_processors.py b/backend/report_submission/context_processors.py index fa2a0463d3..0650ae8522 100644 --- a/backend/report_submission/context_processors.py +++ b/backend/report_submission/context_processors.py @@ -1,5 +1,5 @@ -def certifiers_emails_must_not_match(request): - emails_must_not_match = ( - "The certifying auditor and auditee should not have the same email address" - ) - return {"certifiers_emails_must_not_match": emails_must_not_match} +def certifiers_emails_must_not_match(request): + emails_must_not_match = ( + "The certifying auditor and auditee should not have the same email address" + ) + return {"certifiers_emails_must_not_match": emails_must_not_match} diff --git a/backend/requirements.txt b/backend/requirements.txt index 30c4848328..87eca94878 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,1242 +1,1242 @@ -# -# This file is autogenerated by pip-compile with Python 3.12 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes --output-file=requirements.txt ./requirements/requirements.in -# -annotated-types==0.6.0 \ - --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ - --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d - # via pydantic -appdirs==1.4.4 \ - --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \ - --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 - # via fs -asgiref==3.8.1 \ - --hash=sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47 \ - --hash=sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590 - # via - # django - # django-cors-headers -attrs==23.2.0 \ - --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ - --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 - # via - # jsonschema - # referencing -boto3==1.34.83 \ - --hash=sha256:33cf93f6de5176f1188c923f4de1ae149ed723b89ed12e434f2b2f628491769e \ - --hash=sha256:9733ce811bd82feab506ad9309e375a79cabe8c6149061971c17754ce8997551 - # via - # -r ./requirements/requirements.in - # django-storages -botocore==1.34.83 \ - --hash=sha256:0a3fbbe018416aeefa8978454fb0b8129adbaf556647b72269bf02e4bf1f4161 \ - --hash=sha256:0f302aa76283d4df62b4fbb6d3d20115c1a8957fc02171257fc93904d69d5636 - # via - # boto3 - # s3transfer -certifi==2024.2.2 \ - --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \ - --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1 - # via requests -cfenv==0.5.3 \ - --hash=sha256:7815bffcc4a3db350f92517157fafc577c11b5a7ff172dc5632f1042b93073e8 \ - --hash=sha256:c7a91a4c82431acfc35db664c194d5e6cc7f4df3dcb692d0f836a6ceb0156167 - # via -r ./requirements/requirements.in -cffi==1.16.0 \ - --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ - --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ - --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ - --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ - --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ - --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ - --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ - --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ - --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ - --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ - --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ - --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ - --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ - --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ - --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ - --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ - --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ - --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ - --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ - --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ - --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ - --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ - --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ - --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ - --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ - --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ - --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ - --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ - --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ - --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ - --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ - --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ - --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ - --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ - --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ - --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ - --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ - --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ - --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ - --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ - --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ - --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ - --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ - --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ - --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ - --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ - --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ - --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ - --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ - --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ - --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ - --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 - # via - # cryptography - # gevent -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 - # via requests -cryptography==42.0.5 \ - --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ - --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ - --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ - --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ - --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ - --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ - --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ - --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ - --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ - --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ - --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ - --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ - --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ - --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ - --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ - --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ - --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ - --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ - --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ - --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ - --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ - --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ - --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ - --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ - --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ - --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ - --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ - --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ - --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ - --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ - --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ - --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 - # via - # -r ./requirements/requirements.in - # oic -defusedxml==0.7.1 \ - --hash=sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69 \ - --hash=sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61 - # via oic -dj-database-url==2.1.0 \ - --hash=sha256:04bc34b248d4c21aaa13e4ab419ae6575ef5f10f3df735ce7da97722caa356e0 \ - --hash=sha256:f2042cefe1086e539c9da39fad5ad7f61173bf79665e69bf7e4de55fa88b135f - # via environs -dj-email-url==1.0.6 \ - --hash=sha256:55ffe3329e48f54f8a75aa36ece08f365e09d61f8a209773ef09a1d4760e699a \ - --hash=sha256:cbd08327fbb08b104eac160fb4703f375532e4c0243eb230f5b960daee7a96db - # via environs -django==5.1 \ - --hash=sha256:848a5980e8efb76eea70872fb0e4bc5e371619c70fffbe48e3e1b50b2c09455d \ - --hash=sha256:d3b811bf5371a26def053d7ee42a9df1267ef7622323fe70a601936725aa4557 - # via - # -r ./requirements/requirements.in - # dj-database-url - # django-cors-headers - # django-csp - # django-dbbackup - # django-storages - # djangorestframework - # djangorestframework-simplejwt -django-cache-url==3.4.5 \ - --hash=sha256:5f350759978483ab85dc0e3e17b3d53eed3394a28148f6bf0f53d11d0feb5b3c \ - --hash=sha256:eb9fb194717524348c95cad9905b70b647452741c1d9e481fac6d2125f0ad917 - # via environs -django-cors-headers==4.3.1 \ - --hash=sha256:0b1fd19297e37417fc9f835d39e45c8c642938ddba1acce0c1753d3edef04f36 \ - --hash=sha256:0bf65ef45e606aff1994d35503e6b677c0b26cafff6506f8fd7187f3be840207 - # via -r ./requirements/requirements.in -django-csp==3.8 \ - --hash=sha256:19b2978b03fcd73517d7d67acbc04fbbcaec0facc3e83baa502965892d1e0719 \ - --hash=sha256:ef0f1a9f7d8da68ae6e169c02e9ac661c0ecf04db70e0d1d85640512a68471c0 - # via -r ./requirements/requirements.in -django-dbbackup==4.1.0 \ - --hash=sha256:c411d38d0f8e60ab3254017278c14ebd75d4001b5634fc73be7fbe8a5260583b \ - --hash=sha256:c539b5246b429a22a8efadbab3719ee6b8eda45c66c4ff6592056c590d51c782 - # via -r ./requirements/requirements.in -django-fsm==3.0.0 \ - --hash=sha256:0112bcac573ad14051cf8ebe73bf296b6d5409f093e5f1677eb16e2196e263b3 \ - --hash=sha256:fa28f84f47eae7ce9247585ac6c1895e4ada08efff93fb243a59e9ff77b2d4ec - # via -r ./requirements/requirements.in -django-storages[boto3]==1.14.2 \ - --hash=sha256:1db759346b52ada6c2efd9f23d8241ecf518813eb31db9e2589207174f58f6ad \ - --hash=sha256:51b36af28cc5813b98d5f3dfe7459af638d84428c8df4a03990c7d74d1bea4e5 - # via -r ./requirements/requirements.in -djangorestframework==3.15.2 \ - --hash=sha256:2b8871b062ba1aefc2de01f773875441a961fefbf79f5eed1e32b2f096944b20 \ - --hash=sha256:36fe88cd2d6c6bec23dca9804bab2ba5517a8bb9d8f47ebc68981b56840107ad - # via - # -r ./requirements/requirements.in - # djangorestframework-simplejwt -djangorestframework-simplejwt==5.3.1 \ - --hash=sha256:381bc966aa46913905629d472cd72ad45faa265509764e20ffd440164c88d220 \ - --hash=sha256:6c4bd37537440bc439564ebf7d6085e74c5411485197073f508ebdfa34bc9fae - # via -r ./requirements/requirements.in -environs[django]==11.0.0 \ - --hash=sha256:069727a8f73d8ba8d033d3cd95c0da231d44f38f1da773bf076cef168d312ee8 \ - --hash=sha256:e0bcfd41c718c07a7db422f9109e490746450da38793fe4ee197f397b9343435 - # via -r ./requirements/requirements.in -et-xmlfile==1.1.0 \ - --hash=sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c \ - --hash=sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada - # via openpyxl -faker==24.9.0 \ - --hash=sha256:73b1e7967b0ceeac42fc99a8c973bb49e4499cc4044d20d17ab661d5cb7eda1d \ - --hash=sha256:97c7874665e8eb7b517f97bf3b59f03bf3f07513fe2c159e98b6b9ea6b9f2b3d - # via -r ./requirements/requirements.in -fs==2.4.16 \ - --hash=sha256:660064febbccda264ae0b6bace80a8d1be9e089e0a5eb2427b7d517f9a91545c \ - --hash=sha256:ae97c7d51213f4b70b6a958292530289090de3a7e15841e108fbe144f069d313 - # via -r ./requirements/requirements.in -furl==2.1.3 \ - --hash=sha256:5a6188fe2666c484a12159c18be97a1977a71d632ef5bb867ef15f54af39cc4e \ - --hash=sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0 - # via cfenv -future==1.0.0 \ - --hash=sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216 \ - --hash=sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05 - # via pyjwkest -gevent==24.2.1 \ - --hash=sha256:03aa5879acd6b7076f6a2a307410fb1e0d288b84b03cdfd8c74db8b4bc882fc5 \ - --hash=sha256:117e5837bc74a1673605fb53f8bfe22feb6e5afa411f524c835b2ddf768db0de \ - --hash=sha256:141a2b24ad14f7b9576965c0c84927fc85f824a9bb19f6ec1e61e845d87c9cd8 \ - --hash=sha256:14532a67f7cb29fb055a0e9b39f16b88ed22c66b96641df8c04bdc38c26b9ea5 \ - --hash=sha256:1dffb395e500613e0452b9503153f8f7ba587c67dd4a85fc7cd7aa7430cb02cc \ - --hash=sha256:2955eea9c44c842c626feebf4459c42ce168685aa99594e049d03bedf53c2800 \ - --hash=sha256:2ae3a25ecce0a5b0cd0808ab716bfca180230112bb4bc89b46ae0061d62d4afe \ - --hash=sha256:2e9ac06f225b696cdedbb22f9e805e2dd87bf82e8fa5e17756f94e88a9d37cf7 \ - --hash=sha256:368a277bd9278ddb0fde308e6a43f544222d76ed0c4166e0d9f6b036586819d9 \ - --hash=sha256:3adfb96637f44010be8abd1b5e73b5070f851b817a0b182e601202f20fa06533 \ - --hash=sha256:3d5325ccfadfd3dcf72ff88a92fb8fc0b56cacc7225f0f4b6dcf186c1a6eeabc \ - --hash=sha256:432fc76f680acf7cf188c2ee0f5d3ab73b63c1f03114c7cd8a34cebbe5aa2056 \ - --hash=sha256:44098038d5e2749b0784aabb27f1fcbb3f43edebedf64d0af0d26955611be8d6 \ - --hash=sha256:5a1df555431f5cd5cc189a6ee3544d24f8c52f2529134685f1e878c4972ab026 \ - --hash=sha256:6c47ae7d1174617b3509f5d884935e788f325eb8f1a7efc95d295c68d83cce40 \ - --hash=sha256:6f947a9abc1a129858391b3d9334c45041c08a0f23d14333d5b844b6e5c17a07 \ - --hash=sha256:782a771424fe74bc7e75c228a1da671578c2ba4ddb2ca09b8f959abdf787331e \ - --hash=sha256:7899a38d0ae7e817e99adb217f586d0a4620e315e4de577444ebeeed2c5729be \ - --hash=sha256:7b00f8c9065de3ad226f7979154a7b27f3b9151c8055c162332369262fc025d8 \ - --hash=sha256:8f4b8e777d39013595a7740b4463e61b1cfe5f462f1b609b28fbc1e4c4ff01e5 \ - --hash=sha256:90cbac1ec05b305a1b90ede61ef73126afdeb5a804ae04480d6da12c56378df1 \ - --hash=sha256:918cdf8751b24986f915d743225ad6b702f83e1106e08a63b736e3a4c6ead789 \ - --hash=sha256:9202f22ef811053077d01f43cc02b4aaf4472792f9fd0f5081b0b05c926cca19 \ - --hash=sha256:94138682e68ec197db42ad7442d3cf9b328069c3ad8e4e5022e6b5cd3e7ffae5 \ - --hash=sha256:968581d1717bbcf170758580f5f97a2925854943c45a19be4d47299507db2eb7 \ - --hash=sha256:9d8d0642c63d453179058abc4143e30718b19a85cbf58c2744c9a63f06a1d388 \ - --hash=sha256:a7ceb59986456ce851160867ce4929edaffbd2f069ae25717150199f8e1548b8 \ - --hash=sha256:b9913c45d1be52d7a5db0c63977eebb51f68a2d5e6fd922d1d9b5e5fd758cc98 \ - --hash=sha256:bde283313daf0b34a8d1bab30325f5cb0f4e11b5869dbe5bc61f8fe09a8f66f3 \ - --hash=sha256:bf5b9c72b884c6f0c4ed26ef204ee1f768b9437330422492c319470954bc4cc7 \ - --hash=sha256:ca80b121bbec76d7794fcb45e65a7eca660a76cc1a104ed439cdbd7df5f0b060 \ - --hash=sha256:cdf66977a976d6a3cfb006afdf825d1482f84f7b81179db33941f2fc9673bb1d \ - --hash=sha256:d4faf846ed132fd7ebfbbf4fde588a62d21faa0faa06e6f468b7faa6f436b661 \ - --hash=sha256:d7f87c2c02e03d99b95cfa6f7a776409083a9e4d468912e18c7680437b29222c \ - --hash=sha256:dd23df885318391856415e20acfd51a985cba6919f0be78ed89f5db9ff3a31cb \ - --hash=sha256:f5de3c676e57177b38857f6e3cdfbe8f38d1cd754b63200c0615eaa31f514b4f \ - --hash=sha256:f5e8e8d60e18d5f7fd49983f0c4696deeddaf6e608fbab33397671e2fcc6cc91 \ - --hash=sha256:f7cac622e11b4253ac4536a654fe221249065d9a69feb6cdcd4d9af3503602e0 \ - --hash=sha256:f8a04cf0c5b7139bc6368b461257d4a757ea2fe89b3773e494d235b7dd51119f \ - --hash=sha256:f8bb35ce57a63c9a6896c71a285818a3922d8ca05d150fd1fe49a7f57287b836 \ - --hash=sha256:fbfdce91239fe306772faab57597186710d5699213f4df099d1612da7320d682 - # via gunicorn -greenlet==3.0.3 \ - --hash=sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67 \ - --hash=sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6 \ - --hash=sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257 \ - --hash=sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4 \ - --hash=sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676 \ - --hash=sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61 \ - --hash=sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc \ - --hash=sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca \ - --hash=sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7 \ - --hash=sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728 \ - --hash=sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305 \ - --hash=sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6 \ - --hash=sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379 \ - --hash=sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414 \ - --hash=sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04 \ - --hash=sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a \ - --hash=sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf \ - --hash=sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491 \ - --hash=sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559 \ - --hash=sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e \ - --hash=sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274 \ - --hash=sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb \ - --hash=sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b \ - --hash=sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9 \ - --hash=sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b \ - --hash=sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be \ - --hash=sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506 \ - --hash=sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405 \ - --hash=sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113 \ - --hash=sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f \ - --hash=sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5 \ - --hash=sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230 \ - --hash=sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d \ - --hash=sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f \ - --hash=sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a \ - --hash=sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e \ - --hash=sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61 \ - --hash=sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6 \ - --hash=sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d \ - --hash=sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71 \ - --hash=sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22 \ - --hash=sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2 \ - --hash=sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3 \ - --hash=sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067 \ - --hash=sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc \ - --hash=sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881 \ - --hash=sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3 \ - --hash=sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e \ - --hash=sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac \ - --hash=sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53 \ - --hash=sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0 \ - --hash=sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b \ - --hash=sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83 \ - --hash=sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41 \ - --hash=sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c \ - --hash=sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf \ - --hash=sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da \ - --hash=sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33 - # via - # -r ./requirements/requirements.in - # gevent - # sqlalchemy -gunicorn[gevent]==22.0.0 \ - --hash=sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9 \ - --hash=sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63 - # via -r ./requirements/requirements.in -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 - # via requests -jmespath==1.0.1 \ - --hash=sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980 \ - --hash=sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe - # via - # boto3 - # botocore -jsonpath-ng==1.6.1 \ - --hash=sha256:086c37ba4917304850bd837aeab806670224d3f038fe2833ff593a672ef0a5fa \ - --hash=sha256:8f22cd8273d7772eea9aaa84d922e0841aa36fdb8a2c6b7f6c3791a16a9bc0be - # via -r ./requirements/requirements.in -jsonschema==4.21.1 \ - --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \ - --hash=sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5 - # via -r ./requirements/requirements.in -jsonschema-specifications==2023.12.1 \ - --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ - --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c - # via jsonschema -mako==1.3.3 \ - --hash=sha256:5324b88089a8978bf76d1629774fcc2f1c07b82acdf00f4c5dd8ceadfffc4b40 \ - --hash=sha256:e16c01d9ab9c11f7290eef1cfefc093fb5a45ee4a3da09e2fec2e4d1bae54e73 - # via oic -markupsafe==2.1.5 \ - --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ - --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ - --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ - --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ - --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ - --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ - --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ - --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ - --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ - --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ - --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ - --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ - --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ - --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ - --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ - --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ - --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ - --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ - --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ - --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ - --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ - --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ - --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ - --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ - --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ - --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ - --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ - --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ - --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ - --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ - --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ - --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ - --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ - --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ - --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ - --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ - --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ - --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ - --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ - --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ - --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ - --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ - --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ - --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ - --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ - --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ - --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ - --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ - --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ - --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ - --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ - --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ - --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ - --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ - --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ - --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ - --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ - --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ - --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ - --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 - # via mako -marshmallow==3.21.1 \ - --hash=sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3 \ - --hash=sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633 - # via environs -newrelic==9.11.0 \ - --hash=sha256:02eab15af4a08b870bcfdbc56390ecbb9dcacd144fe77f39a26d1be207bd30f0 \ - --hash=sha256:0d43a0891bf71333f6a6253cf87dea2c9009e22699a2acfd93608125a33b1936 \ - --hash=sha256:10cb7f7a78c49580602b90f367f3378264e495f2f3706734f88ced7e7ca9b033 \ - --hash=sha256:11653fd14f55999c5058b4dde8c721833076c0bd3efe668296725a622e9e7de8 \ - --hash=sha256:1f6e1bb0df8ff2b54195baac41fddc0e15ea1bdf1deb6af49153487696355181 \ - --hash=sha256:2010ed2793294a7e3c1057ec301d48997ed05dcef114d4c25120ac771f66bac1 \ - --hash=sha256:21e7b52d5b214bba3534ced166e6ec991117772815020bec38b0571fdcecbaf4 \ - --hash=sha256:2b02139458aefba86a4572cb8214f91a942103d24d5502395f64d6d7a4ad3f25 \ - --hash=sha256:3283885bcf31d9cbf8facb0004508a4eaa652a62471e0b724d26f9738a291979 \ - --hash=sha256:34b25d1beaf19825409f3d915a5bafa87b7b9230415821422be1e78e988750b7 \ - --hash=sha256:35d08587e694f5c517e55fb7119f924c64569d2e7ec4968ef761fc1f7bd1f40c \ - --hash=sha256:553674a66ef2c2206852b415b74e3c2fb7ed2b92e9800b68394d577f6aa1133e \ - --hash=sha256:5f477cdda9b998205084b822089b3ee4a8a2d9cd66b6f12487c9f9002566c5cb \ - --hash=sha256:6ceac1d8f13da38fa1b41c8202a91d3b4345e06adb655deaae0df08911fda56f \ - --hash=sha256:72dd3eb190c62bb54aa59029f0d6ac1420c2050b3aaf88d947fc7f62ec58d97f \ - --hash=sha256:76eb4cc599645a38a459b0002696d9c84844fecb02cf07bc18a4a91f737e438e \ - --hash=sha256:7c073f4c26539d6d74fbf4bac7f5046cac578975fb2cf77b156f802f1b39835e \ - --hash=sha256:7f1e473eb0505cb91ab9a4155321eabe13a2f6b93fb3c41d6f10e5486276be60 \ - --hash=sha256:8664e3b9e6ee0f78806b0cf7c90656a1a86d13232c2e0be18a1b1eb452f3f5d1 \ - --hash=sha256:87670d872c3abc36203e10f93d266c8f36ad2bd06fb54e790001a409f9e2f40f \ - --hash=sha256:94369792d61ccf21469c35cf66886c32350a180d8e782c0d28ec66411db29474 \ - --hash=sha256:9f95eb366ff714bce32476d256551b853247a72398ec46a89148ef5108509aa8 \ - --hash=sha256:b33539345c7cf349b65a176a30ab38e2998b071512a7450f5c5b89ac6c097006 \ - --hash=sha256:b5d2d0814e1aa9de5bd55797ff8c426d98200ba46ca14dbca15557d0f17cfb4e \ - --hash=sha256:b7903ba71ce5a4b2840f6d3c63ecd0fb3a018d2aceb915b48133c13c4a60185f \ - --hash=sha256:bc5c1b8a51946f64c34fc5fa29ce0221c4927a65c7f4435b3b8adeb29b9812d2 \ - --hash=sha256:d88fa17a515fb002eb14570800e4bfa69ac87ac27e6e2a96bc2bc9b60c80057a \ - --hash=sha256:dcec4173cd0f83420e6f61f92955065f1d460075af5e5bf88a5fea746e3cc180 \ - --hash=sha256:ffc0d8d490de0f12df70db637481aaadb8a43fb6d71ba8866dc14242aa5edad4 - # via -r ./requirements/requirements.in -numpy==1.26.4 \ - --hash=sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b \ - --hash=sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818 \ - --hash=sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20 \ - --hash=sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0 \ - --hash=sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010 \ - --hash=sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a \ - --hash=sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea \ - --hash=sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c \ - --hash=sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71 \ - --hash=sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110 \ - --hash=sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be \ - --hash=sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a \ - --hash=sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a \ - --hash=sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5 \ - --hash=sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed \ - --hash=sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd \ - --hash=sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c \ - --hash=sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e \ - --hash=sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0 \ - --hash=sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c \ - --hash=sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a \ - --hash=sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b \ - --hash=sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0 \ - --hash=sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6 \ - --hash=sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2 \ - --hash=sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a \ - --hash=sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30 \ - --hash=sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218 \ - --hash=sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5 \ - --hash=sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07 \ - --hash=sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2 \ - --hash=sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4 \ - --hash=sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764 \ - --hash=sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef \ - --hash=sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3 \ - --hash=sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f - # via pandas -oic==1.6.1 \ - --hash=sha256:385a1f64bb59519df1e23840530921bf416740240f505ea6d161e331d3d39fad \ - --hash=sha256:fcbf948a22e4d4df66f6bf57d327933f32a7b539640d9b42883457634360ba78 - # via -r ./requirements/requirements.in -openpyxl==3.1.2 \ - --hash=sha256:a6f5977418eff3b2d5500d54d9db50c8277a368436f4e4f8ddb1be3422870184 \ - --hash=sha256:f91456ead12ab3c6c2e9491cf33ba6d08357d802192379bb482f1033ade496f5 - # via -r ./requirements/requirements.in -orderedmultidict==1.0.1 \ - --hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \ - --hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3 - # via furl -packaging==24.0 \ - --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ - --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 - # via - # gunicorn - # marshmallow -pandas==2.2.2 \ - --hash=sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863 \ - --hash=sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2 \ - --hash=sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1 \ - --hash=sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad \ - --hash=sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db \ - --hash=sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76 \ - --hash=sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51 \ - --hash=sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32 \ - --hash=sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08 \ - --hash=sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b \ - --hash=sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4 \ - --hash=sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921 \ - --hash=sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288 \ - --hash=sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee \ - --hash=sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0 \ - --hash=sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24 \ - --hash=sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99 \ - --hash=sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151 \ - --hash=sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce \ - --hash=sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57 \ - --hash=sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef \ - --hash=sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54 \ - --hash=sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a \ - --hash=sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23 \ - --hash=sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772 \ - --hash=sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce \ - --hash=sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad - # via -r ./requirements/requirements.in -peewee==3.17.1 \ - --hash=sha256:e009ac4227c4fdc0058a56e822ad5987684f0a1fbb20fed577200785102581c3 - # via -r ./requirements/requirements.in -ply==3.11 \ - --hash=sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3 \ - --hash=sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce - # via jsonpath-ng -psycogreen==1.0.2 \ - --hash=sha256:c429845a8a49cf2f76b71265008760bcd7c7c77d80b806db4dc81116dbcd130d - # via -r ./requirements/requirements.in -psycopg2-binary==2.9.9 \ - --hash=sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9 \ - --hash=sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77 \ - --hash=sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e \ - --hash=sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84 \ - --hash=sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3 \ - --hash=sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2 \ - --hash=sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67 \ - --hash=sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876 \ - --hash=sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152 \ - --hash=sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f \ - --hash=sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a \ - --hash=sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6 \ - --hash=sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503 \ - --hash=sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f \ - --hash=sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493 \ - --hash=sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996 \ - --hash=sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f \ - --hash=sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e \ - --hash=sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59 \ - --hash=sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94 \ - --hash=sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7 \ - --hash=sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682 \ - --hash=sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420 \ - --hash=sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae \ - --hash=sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291 \ - --hash=sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe \ - --hash=sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980 \ - --hash=sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93 \ - --hash=sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692 \ - --hash=sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119 \ - --hash=sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716 \ - --hash=sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472 \ - --hash=sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b \ - --hash=sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2 \ - --hash=sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc \ - --hash=sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c \ - --hash=sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5 \ - --hash=sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab \ - --hash=sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984 \ - --hash=sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9 \ - --hash=sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf \ - --hash=sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0 \ - --hash=sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f \ - --hash=sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212 \ - --hash=sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb \ - --hash=sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be \ - --hash=sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90 \ - --hash=sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041 \ - --hash=sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7 \ - --hash=sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860 \ - --hash=sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d \ - --hash=sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245 \ - --hash=sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27 \ - --hash=sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417 \ - --hash=sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359 \ - --hash=sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202 \ - --hash=sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0 \ - --hash=sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7 \ - --hash=sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba \ - --hash=sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1 \ - --hash=sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd \ - --hash=sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07 \ - --hash=sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98 \ - --hash=sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55 \ - --hash=sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d \ - --hash=sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972 \ - --hash=sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f \ - --hash=sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e \ - --hash=sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26 \ - --hash=sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957 \ - --hash=sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53 \ - --hash=sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52 - # via -r ./requirements/requirements.in -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc - # via cffi -pycryptodome==3.20.0 \ - --hash=sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690 \ - --hash=sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7 \ - --hash=sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4 \ - --hash=sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd \ - --hash=sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5 \ - --hash=sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc \ - --hash=sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818 \ - --hash=sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab \ - --hash=sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d \ - --hash=sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a \ - --hash=sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25 \ - --hash=sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091 \ - --hash=sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea \ - --hash=sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a \ - --hash=sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c \ - --hash=sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72 \ - --hash=sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9 \ - --hash=sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6 \ - --hash=sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044 \ - --hash=sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04 \ - --hash=sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c \ - --hash=sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e \ - --hash=sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f \ - --hash=sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b \ - --hash=sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4 \ - --hash=sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33 \ - --hash=sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f \ - --hash=sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e \ - --hash=sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a \ - --hash=sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2 \ - --hash=sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3 \ - --hash=sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128 - # via -r ./requirements/requirements.in -pycryptodomex==3.20.0 \ - --hash=sha256:0daad007b685db36d977f9de73f61f8da2a7104e20aca3effd30752fd56f73e1 \ - --hash=sha256:108e5f1c1cd70ffce0b68739c75734437c919d2eaec8e85bffc2c8b4d2794305 \ - --hash=sha256:19764605feea0df966445d46533729b645033f134baeb3ea26ad518c9fdf212c \ - --hash=sha256:1be97461c439a6af4fe1cf8bf6ca5936d3db252737d2f379cc6b2e394e12a458 \ - --hash=sha256:25cd61e846aaab76d5791d006497134602a9e451e954833018161befc3b5b9ed \ - --hash=sha256:2a47bcc478741b71273b917232f521fd5704ab4b25d301669879e7273d3586cc \ - --hash=sha256:59af01efb011b0e8b686ba7758d59cf4a8263f9ad35911bfe3f416cee4f5c08c \ - --hash=sha256:5dcac11031a71348faaed1f403a0debd56bf5404232284cf8c761ff918886ebc \ - --hash=sha256:62a5ec91388984909bb5398ea49ee61b68ecb579123694bffa172c3b0a107079 \ - --hash=sha256:645bd4ca6f543685d643dadf6a856cc382b654cc923460e3a10a49c1b3832aeb \ - --hash=sha256:653b29b0819605fe0898829c8ad6400a6ccde096146730c2da54eede9b7b8baa \ - --hash=sha256:69138068268127cd605e03438312d8f271135a33140e2742b417d027a0539427 \ - --hash=sha256:6e186342cfcc3aafaad565cbd496060e5a614b441cacc3995ef0091115c1f6c5 \ - --hash=sha256:76bd15bb65c14900d98835fcd10f59e5e0435077431d3a394b60b15864fddd64 \ - --hash=sha256:7805830e0c56d88f4d491fa5ac640dfc894c5ec570d1ece6ed1546e9df2e98d6 \ - --hash=sha256:7a710b79baddd65b806402e14766c721aee8fb83381769c27920f26476276c1e \ - --hash=sha256:7a7a8f33a1f1fb762ede6cc9cbab8f2a9ba13b196bfaf7bc6f0b39d2ba315a43 \ - --hash=sha256:82ee7696ed8eb9a82c7037f32ba9b7c59e51dda6f105b39f043b6ef293989cb3 \ - --hash=sha256:88afd7a3af7ddddd42c2deda43d53d3dfc016c11327d0915f90ca34ebda91499 \ - --hash=sha256:8af1a451ff9e123d0d8bd5d5e60f8e3315c3a64f3cdd6bc853e26090e195cdc8 \ - --hash=sha256:8ee606964553c1a0bc74057dd8782a37d1c2bc0f01b83193b6f8bb14523b877b \ - --hash=sha256:91852d4480a4537d169c29a9d104dda44094c78f1f5b67bca76c29a91042b623 \ - --hash=sha256:9c682436c359b5ada67e882fec34689726a09c461efd75b6ea77b2403d5665b7 \ - --hash=sha256:bc3ee1b4d97081260d92ae813a83de4d2653206967c4a0a017580f8b9548ddbc \ - --hash=sha256:bca649483d5ed251d06daf25957f802e44e6bb6df2e8f218ae71968ff8f8edc4 \ - --hash=sha256:c39778fd0548d78917b61f03c1fa8bfda6cfcf98c767decf360945fe6f97461e \ - --hash=sha256:cbe71b6712429650e3883dc81286edb94c328ffcd24849accac0a4dbcc76958a \ - --hash=sha256:d00fe8596e1cc46b44bf3907354e9377aa030ec4cd04afbbf6e899fc1e2a7781 \ - --hash=sha256:d3584623e68a5064a04748fb6d76117a21a7cb5eaba20608a41c7d0c61721794 \ - --hash=sha256:e48217c7901edd95f9f097feaa0388da215ed14ce2ece803d3f300b4e694abea \ - --hash=sha256:f2e497413560e03421484189a6b65e33fe800d3bd75590e6d78d4dfdb7accf3b \ - --hash=sha256:ff5c9a67f8a4fba4aed887216e32cbc48f2a6fb2673bb10a99e43be463e15913 - # via - # -r ./requirements/requirements.in - # oic - # pyjwkest -pydantic==2.7.0 \ - --hash=sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352 \ - --hash=sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383 - # via pydantic-settings -pydantic-core==2.18.1 \ - --hash=sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6 \ - --hash=sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb \ - --hash=sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0 \ - --hash=sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6 \ - --hash=sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47 \ - --hash=sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a \ - --hash=sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a \ - --hash=sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac \ - --hash=sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88 \ - --hash=sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db \ - --hash=sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d \ - --hash=sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d \ - --hash=sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9 \ - --hash=sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e \ - --hash=sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b \ - --hash=sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d \ - --hash=sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649 \ - --hash=sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c \ - --hash=sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1 \ - --hash=sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09 \ - --hash=sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0 \ - --hash=sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90 \ - --hash=sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d \ - --hash=sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294 \ - --hash=sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144 \ - --hash=sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b \ - --hash=sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1 \ - --hash=sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b \ - --hash=sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2 \ - --hash=sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad \ - --hash=sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622 \ - --hash=sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17 \ - --hash=sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06 \ - --hash=sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc \ - --hash=sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50 \ - --hash=sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d \ - --hash=sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59 \ - --hash=sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539 \ - --hash=sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a \ - --hash=sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b \ - --hash=sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5 \ - --hash=sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9 \ - --hash=sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278 \ - --hash=sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6 \ - --hash=sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44 \ - --hash=sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0 \ - --hash=sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb \ - --hash=sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80 \ - --hash=sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5 \ - --hash=sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570 \ - --hash=sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b \ - --hash=sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de \ - --hash=sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6 \ - --hash=sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8 \ - --hash=sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203 \ - --hash=sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7 \ - --hash=sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048 \ - --hash=sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae \ - --hash=sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89 \ - --hash=sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f \ - --hash=sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926 \ - --hash=sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2 \ - --hash=sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76 \ - --hash=sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d \ - --hash=sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411 \ - --hash=sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9 \ - --hash=sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2 \ - --hash=sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586 \ - --hash=sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35 \ - --hash=sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c \ - --hash=sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143 \ - --hash=sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6 \ - --hash=sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60 \ - --hash=sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b \ - --hash=sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226 \ - --hash=sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519 \ - --hash=sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31 \ - --hash=sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7 \ - --hash=sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b - # via pydantic -pydantic-settings==2.2.1 \ - --hash=sha256:00b9f6a5e95553590434c0fa01ead0b216c3e10bc54ae02e37f359948643c5ed \ - --hash=sha256:0235391d26db4d2190cb9b31051c4b46882d28a51533f97440867f012d4da091 - # via oic -pydash==8.0.0 \ - --hash=sha256:5a90d98b5f370bb9620f786221579df8f83d54f1f58de6a66f52b1bdba7175d1 \ - --hash=sha256:be2c35df332473a5a939d485422c71a03b19b1a98d6559efb832eadb4a2cfc36 - # via -r ./requirements/requirements.in -pyjwkest==1.4.2 \ - --hash=sha256:5560fd5ba08655f29ff6ad1df1e15dc05abc9d976fcbcec8d2b5167f49b70222 - # via oic -pyjwt==2.8.0 \ - --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ - --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 - # via - # -r ./requirements/requirements.in - # djangorestframework-simplejwt -pypdf==4.2.0 \ - --hash=sha256:dc035581664e0ad717e3492acebc1a5fc23dba759e788e3d4a9fc9b1a32e72c1 \ - --hash=sha256:fe63f3f7d1dcda1c9374421a94c1bba6c6f8c4a62173a59b64ffd52058f846b1 - # via -r ./requirements/requirements.in -python-dateutil==2.9.0.post0 \ - --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ - --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 - # via - # botocore - # faker - # pandas -python-dotenv==1.0.1 \ - --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \ - --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a - # via - # environs - # pydantic-settings -python-json-logger==2.0.7 \ - --hash=sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c \ - --hash=sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd - # via -r ./requirements/requirements.in -python-slugify==8.0.4 \ - --hash=sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8 \ - --hash=sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856 - # via -r ./requirements/requirements.in -pytz==2024.1 \ - --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \ - --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319 - # via - # django-dbbackup - # pandas -pyyaml==6.0.1 \ - --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \ - --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ - --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \ - --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ - --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ - --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ - --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ - --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ - --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ - --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ - --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \ - --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \ - --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ - --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \ - --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ - --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ - --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ - --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ - --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ - --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ - --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ - --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \ - --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ - --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ - --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ - --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \ - --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ - --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ - --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ - --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ - --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ - --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ - --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ - --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ - --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ - --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ - --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ - --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ - --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ - --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ - --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ - --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ - --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \ - --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ - --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \ - --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ - --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ - --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ - --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ - --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ - --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f - # via -r ./requirements/requirements.in -referencing==0.34.0 \ - --hash=sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844 \ - --hash=sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4 - # via - # jsonschema - # jsonschema-specifications -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 - # via - # -r ./requirements/requirements.in - # oic - # pyjwkest -rpds-py==0.18.0 \ - --hash=sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f \ - --hash=sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c \ - --hash=sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76 \ - --hash=sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e \ - --hash=sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157 \ - --hash=sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f \ - --hash=sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5 \ - --hash=sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05 \ - --hash=sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24 \ - --hash=sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1 \ - --hash=sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8 \ - --hash=sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b \ - --hash=sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb \ - --hash=sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07 \ - --hash=sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1 \ - --hash=sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6 \ - --hash=sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e \ - --hash=sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e \ - --hash=sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1 \ - --hash=sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab \ - --hash=sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4 \ - --hash=sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17 \ - --hash=sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594 \ - --hash=sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d \ - --hash=sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d \ - --hash=sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3 \ - --hash=sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c \ - --hash=sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66 \ - --hash=sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f \ - --hash=sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80 \ - --hash=sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33 \ - --hash=sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f \ - --hash=sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c \ - --hash=sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022 \ - --hash=sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e \ - --hash=sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f \ - --hash=sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da \ - --hash=sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1 \ - --hash=sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688 \ - --hash=sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795 \ - --hash=sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c \ - --hash=sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98 \ - --hash=sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1 \ - --hash=sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20 \ - --hash=sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307 \ - --hash=sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4 \ - --hash=sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18 \ - --hash=sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294 \ - --hash=sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66 \ - --hash=sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467 \ - --hash=sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948 \ - --hash=sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e \ - --hash=sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1 \ - --hash=sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0 \ - --hash=sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7 \ - --hash=sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd \ - --hash=sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641 \ - --hash=sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d \ - --hash=sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9 \ - --hash=sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1 \ - --hash=sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da \ - --hash=sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3 \ - --hash=sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa \ - --hash=sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7 \ - --hash=sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40 \ - --hash=sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496 \ - --hash=sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124 \ - --hash=sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836 \ - --hash=sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434 \ - --hash=sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984 \ - --hash=sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f \ - --hash=sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6 \ - --hash=sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e \ - --hash=sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461 \ - --hash=sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c \ - --hash=sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432 \ - --hash=sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73 \ - --hash=sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58 \ - --hash=sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88 \ - --hash=sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337 \ - --hash=sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7 \ - --hash=sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863 \ - --hash=sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475 \ - --hash=sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3 \ - --hash=sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51 \ - --hash=sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf \ - --hash=sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024 \ - --hash=sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40 \ - --hash=sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9 \ - --hash=sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec \ - --hash=sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb \ - --hash=sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7 \ - --hash=sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861 \ - --hash=sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880 \ - --hash=sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f \ - --hash=sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd \ - --hash=sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca \ - --hash=sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58 \ - --hash=sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e - # via - # jsonschema - # referencing -s3transfer==0.10.1 \ - --hash=sha256:5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19 \ - --hash=sha256:ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d - # via boto3 -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # fs - # furl - # orderedmultidict - # pyjwkest - # python-dateutil -sqlalchemy==2.0.29 \ - --hash=sha256:01d10638a37460616708062a40c7b55f73e4d35eaa146781c683e0fa7f6c43fb \ - --hash=sha256:04c487305ab035a9548f573763915189fc0fe0824d9ba28433196f8436f1449c \ - --hash=sha256:0dfefdb3e54cd15f5d56fd5ae32f1da2d95d78319c1f6dfb9bcd0eb15d603d5d \ - --hash=sha256:0f3ca96af060a5250a8ad5a63699180bc780c2edf8abf96c58af175921df847a \ - --hash=sha256:205f5a2b39d7c380cbc3b5dcc8f2762fb5bcb716838e2d26ccbc54330775b003 \ - --hash=sha256:25664e18bef6dc45015b08f99c63952a53a0a61f61f2e48a9e70cec27e55f699 \ - --hash=sha256:296195df68326a48385e7a96e877bc19aa210e485fa381c5246bc0234c36c78e \ - --hash=sha256:2a0732dffe32333211801b28339d2a0babc1971bc90a983e3035e7b0d6f06b93 \ - --hash=sha256:3071ad498896907a5ef756206b9dc750f8e57352113c19272bdfdc429c7bd7de \ - --hash=sha256:308ef9cb41d099099fffc9d35781638986870b29f744382904bf9c7dadd08513 \ - --hash=sha256:334184d1ab8f4c87f9652b048af3f7abea1c809dfe526fb0435348a6fef3d380 \ - --hash=sha256:38b624e5cf02a69b113c8047cf7f66b5dfe4a2ca07ff8b8716da4f1b3ae81567 \ - --hash=sha256:471fcb39c6adf37f820350c28aac4a7df9d3940c6548b624a642852e727ea586 \ - --hash=sha256:4c142852ae192e9fe5aad5c350ea6befe9db14370b34047e1f0f7cf99e63c63b \ - --hash=sha256:4f6d971255d9ddbd3189e2e79d743ff4845c07f0633adfd1de3f63d930dbe673 \ - --hash=sha256:52c8011088305476691b8750c60e03b87910a123cfd9ad48576d6414b6ec2a1d \ - --hash=sha256:52de4736404e53c5c6a91ef2698c01e52333988ebdc218f14c833237a0804f1b \ - --hash=sha256:5c7b02525ede2a164c5fa5014915ba3591730f2cc831f5be9ff3b7fd3e30958e \ - --hash=sha256:5ef3fbccb4058355053c51b82fd3501a6e13dd808c8d8cd2561e610c5456013c \ - --hash=sha256:5f20cb0a63a3e0ec4e169aa8890e32b949c8145983afa13a708bc4b0a1f30e03 \ - --hash=sha256:61405ea2d563407d316c63a7b5271ae5d274a2a9fbcd01b0aa5503635699fa1e \ - --hash=sha256:77d29cb6c34b14af8a484e831ab530c0f7188f8efed1c6a833a2c674bf3c26ec \ - --hash=sha256:7b184e3de58009cc0bf32e20f137f1ec75a32470f5fede06c58f6c355ed42a72 \ - --hash=sha256:7e614d7a25a43a9f54fcce4675c12761b248547f3d41b195e8010ca7297c369c \ - --hash=sha256:8197d6f7a3d2b468861ebb4c9f998b9df9e358d6e1cf9c2a01061cb9b6cf4e41 \ - --hash=sha256:87a1d53a5382cdbbf4b7619f107cc862c1b0a4feb29000922db72e5a66a5ffc0 \ - --hash=sha256:8c37f1050feb91f3d6c32f864d8e114ff5545a4a7afe56778d76a9aec62638ba \ - --hash=sha256:90453597a753322d6aa770c5935887ab1fc49cc4c4fdd436901308383d698b4b \ - --hash=sha256:988569c8732f54ad3234cf9c561364221a9e943b78dc7a4aaf35ccc2265f1930 \ - --hash=sha256:99a1e69d4e26f71e750e9ad6fdc8614fbddb67cfe2173a3628a2566034e223c7 \ - --hash=sha256:9b19836ccca0d321e237560e475fd99c3d8655d03da80c845c4da20dda31b6e1 \ - --hash=sha256:9d6753305936eddc8ed190e006b7bb33a8f50b9854823485eed3a886857ab8d1 \ - --hash=sha256:a13b917b4ffe5a0a31b83d051d60477819ddf18276852ea68037a144a506efb9 \ - --hash=sha256:a88913000da9205b13f6f195f0813b6ffd8a0c0c2bd58d499e00a30eb508870c \ - --hash=sha256:b2a0e3cf0caac2085ff172c3faacd1e00c376e6884b5bc4dd5b6b84623e29e4f \ - --hash=sha256:b5d7ed79df55a731749ce65ec20d666d82b185fa4898430b17cb90c892741520 \ - --hash=sha256:bab41acf151cd68bc2b466deae5deeb9e8ae9c50ad113444151ad965d5bf685b \ - --hash=sha256:bd9566b8e58cabd700bc367b60e90d9349cd16f0984973f98a9a09f9c64e86f0 \ - --hash=sha256:bda7ce59b06d0f09afe22c56714c65c957b1068dee3d5e74d743edec7daba552 \ - --hash=sha256:c2f9c762a2735600654c654bf48dad388b888f8ce387b095806480e6e4ff6907 \ - --hash=sha256:c4520047006b1d3f0d89e0532978c0688219857eb2fee7c48052560ae76aca1e \ - --hash=sha256:d96710d834a6fb31e21381c6d7b76ec729bd08c75a25a5184b1089141356171f \ - --hash=sha256:dba622396a3170974f81bad49aacebd243455ec3cc70615aeaef9e9613b5bca5 \ - --hash=sha256:dc4ee2d4ee43251905f88637d5281a8d52e916a021384ec10758826f5cbae305 \ - --hash=sha256:dddaae9b81c88083e6437de95c41e86823d150f4ee94bf24e158a4526cbead01 \ - --hash=sha256:de7202ffe4d4a8c1e3cde1c03e01c1a3772c92858837e8f3879b497158e4cb44 \ - --hash=sha256:e5bbe55e8552019c6463709b39634a5fc55e080d0827e2a3a11e18eb73f5cdbd \ - --hash=sha256:ea311d4ee9a8fa67f139c088ae9f905fcf0277d6cd75c310a21a88bf85e130f5 \ - --hash=sha256:fecd5089c4be1bcc37c35e9aa678938d2888845a134dd016de457b942cf5a758 - # via -r ./requirements/requirements.in -sqlparse==0.5.0 \ - --hash=sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93 \ - --hash=sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663 - # via - # -r ./requirements/requirements.in - # django -text-unidecode==1.3 \ - --hash=sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8 \ - --hash=sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93 - # via python-slugify -typing-extensions==4.11.0 \ - --hash=sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0 \ - --hash=sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a - # via - # dj-database-url - # pydantic - # pydantic-core - # pydash - # sqlalchemy -tzdata==2024.1 \ - --hash=sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd \ - --hash=sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252 - # via - # django - # pandas -uritemplate==4.1.1 \ - --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ - --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e - # via -r ./requirements/requirements.in -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 - # via - # -r ./requirements/requirements.in - # botocore - # requests -zope-event==5.0 \ - --hash=sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26 \ - --hash=sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd - # via gevent -zope-interface==6.3 \ - --hash=sha256:014bb94fe6bf1786da1aa044eadf65bc6437bcb81c451592987e5be91e70a91e \ - --hash=sha256:01a0b3dd012f584afcf03ed814bce0fc40ed10e47396578621509ac031be98bf \ - --hash=sha256:10cde8dc6b2fd6a1d0b5ca4be820063e46ddba417ab82bcf55afe2227337b130 \ - --hash=sha256:187f7900b63845dcdef1be320a523dbbdba94d89cae570edc2781eb55f8c2f86 \ - --hash=sha256:1b0c4c90e5eefca2c3e045d9f9ed9f1e2cdbe70eb906bff6b247e17119ad89a1 \ - --hash=sha256:22e8a218e8e2d87d4d9342aa973b7915297a08efbebea5b25900c73e78ed468e \ - --hash=sha256:26c9a37fb395a703e39b11b00b9e921c48f82b6e32cc5851ad5d0618cd8876b5 \ - --hash=sha256:2bb78c12c1ad3a20c0d981a043d133299117b6854f2e14893b156979ed4e1d2c \ - --hash=sha256:2c3cfb272bcb83650e6695d49ae0d14dd06dc694789a3d929f23758557a23d92 \ - --hash=sha256:2f32010ffb87759c6a3ad1c65ed4d2e38e51f6b430a1ca11cee901ec2b42e021 \ - --hash=sha256:3c8731596198198746f7ce2a4487a0edcbc9ea5e5918f0ab23c4859bce56055c \ - --hash=sha256:40aa8c8e964d47d713b226c5baf5f13cdf3a3169c7a2653163b17ff2e2334d10 \ - --hash=sha256:4137025731e824eee8d263b20682b28a0bdc0508de9c11d6c6be54163e5b7c83 \ - --hash=sha256:46034be614d1f75f06e7dcfefba21d609b16b38c21fc912b01a99cb29e58febb \ - --hash=sha256:483e118b1e075f1819b3c6ace082b9d7d3a6a5eb14b2b375f1b80a0868117920 \ - --hash=sha256:4d6b229f5e1a6375f206455cc0a63a8e502ed190fe7eb15e94a312dc69d40299 \ - --hash=sha256:567d54c06306f9c5b6826190628d66753b9f2b0422f4c02d7c6d2b97ebf0a24e \ - --hash=sha256:5683aa8f2639016fd2b421df44301f10820e28a9b96382a6e438e5c6427253af \ - --hash=sha256:600101f43a7582d5b9504a7c629a1185a849ce65e60fca0f6968dfc4b76b6d39 \ - --hash=sha256:62e32f02b3f26204d9c02c3539c802afc3eefb19d601a0987836ed126efb1f21 \ - --hash=sha256:69dedb790530c7ca5345899a1b4cb837cc53ba669051ea51e8c18f82f9389061 \ - --hash=sha256:72d5efecad16c619a97744a4f0b67ce1bcc88115aa82fcf1dc5be9bb403bcc0b \ - --hash=sha256:8d407e0fd8015f6d5dfad481309638e1968d70e6644e0753f229154667dd6cd5 \ - --hash=sha256:a058e6cf8d68a5a19cb5449f42a404f0d6c2778b897e6ce8fadda9cea308b1b0 \ - --hash=sha256:a1adc14a2a9d5e95f76df625a9b39f4709267a483962a572e3f3001ef90ea6e6 \ - --hash=sha256:a56fe1261230093bfeedc1c1a6cd6f3ec568f9b07f031c9a09f46b201f793a85 \ - --hash=sha256:ad4524289d8dbd6fb5aa17aedb18f5643e7d48358f42c007a5ee51a2afc2a7c5 \ - --hash=sha256:afa0491a9f154cf8519a02026dc85a416192f4cb1efbbf32db4a173ba28b289a \ - --hash=sha256:bf34840e102d1d0b2d39b1465918d90b312b1119552cebb61a242c42079817b9 \ - --hash=sha256:c40df4aea777be321b7e68facb901bc67317e94b65d9ab20fb96e0eb3c0b60a1 \ - --hash=sha256:d0e7321557c702bd92dac3c66a2f22b963155fdb4600133b6b29597f62b71b12 \ - --hash=sha256:d165d7774d558ea971cb867739fb334faf68fc4756a784e689e11efa3becd59e \ - --hash=sha256:e78a183a3c2f555c2ad6aaa1ab572d1c435ba42f1dc3a7e8c82982306a19b785 \ - --hash=sha256:e8fa0fb05083a1a4216b4b881fdefa71c5d9a106e9b094cd4399af6b52873e91 \ - --hash=sha256:f83d6b4b22262d9a826c3bd4b2fbfafe1d0000f085ef8e44cd1328eea274ae6a \ - --hash=sha256:f95bebd0afe86b2adc074df29edb6848fc4d474ff24075e2c263d698774e108d - # via gevent - -# The following packages are considered to be unsafe in a requirements file: -setuptools==71.0.2 \ - --hash=sha256:ca359bea0cd5c8ce267d7463239107e87f312f2e2a11b6ca6357565d82b6c0d7 \ - --hash=sha256:f6640114f96be808024fbd1f721161215543796d3a68da4524349de700604ce8 - # via - # fs - # zope-event - # zope-interface +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --generate-hashes --output-file=requirements.txt ./requirements/requirements.in +# +annotated-types==0.6.0 \ + --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ + --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d + # via pydantic +appdirs==1.4.4 \ + --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \ + --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 + # via fs +asgiref==3.8.1 \ + --hash=sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47 \ + --hash=sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590 + # via + # django + # django-cors-headers +attrs==23.2.0 \ + --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ + --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 + # via + # jsonschema + # referencing +boto3==1.34.83 \ + --hash=sha256:33cf93f6de5176f1188c923f4de1ae149ed723b89ed12e434f2b2f628491769e \ + --hash=sha256:9733ce811bd82feab506ad9309e375a79cabe8c6149061971c17754ce8997551 + # via + # -r ./requirements/requirements.in + # django-storages +botocore==1.34.83 \ + --hash=sha256:0a3fbbe018416aeefa8978454fb0b8129adbaf556647b72269bf02e4bf1f4161 \ + --hash=sha256:0f302aa76283d4df62b4fbb6d3d20115c1a8957fc02171257fc93904d69d5636 + # via + # boto3 + # s3transfer +certifi==2024.2.2 \ + --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \ + --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1 + # via requests +cfenv==0.5.3 \ + --hash=sha256:7815bffcc4a3db350f92517157fafc577c11b5a7ff172dc5632f1042b93073e8 \ + --hash=sha256:c7a91a4c82431acfc35db664c194d5e6cc7f4df3dcb692d0f836a6ceb0156167 + # via -r ./requirements/requirements.in +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 + # via + # cryptography + # gevent +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 + # via requests +cryptography==42.0.5 \ + --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ + --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ + --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ + --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ + --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ + --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ + --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ + --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ + --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ + --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ + --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ + --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ + --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ + --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ + --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ + --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ + --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ + --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ + --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ + --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ + --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ + --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ + --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ + --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ + --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ + --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ + --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ + --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ + --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ + --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ + --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ + --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 + # via + # -r ./requirements/requirements.in + # oic +defusedxml==0.7.1 \ + --hash=sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69 \ + --hash=sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61 + # via oic +dj-database-url==2.1.0 \ + --hash=sha256:04bc34b248d4c21aaa13e4ab419ae6575ef5f10f3df735ce7da97722caa356e0 \ + --hash=sha256:f2042cefe1086e539c9da39fad5ad7f61173bf79665e69bf7e4de55fa88b135f + # via environs +dj-email-url==1.0.6 \ + --hash=sha256:55ffe3329e48f54f8a75aa36ece08f365e09d61f8a209773ef09a1d4760e699a \ + --hash=sha256:cbd08327fbb08b104eac160fb4703f375532e4c0243eb230f5b960daee7a96db + # via environs +django==5.1 \ + --hash=sha256:848a5980e8efb76eea70872fb0e4bc5e371619c70fffbe48e3e1b50b2c09455d \ + --hash=sha256:d3b811bf5371a26def053d7ee42a9df1267ef7622323fe70a601936725aa4557 + # via + # -r ./requirements/requirements.in + # dj-database-url + # django-cors-headers + # django-csp + # django-dbbackup + # django-storages + # djangorestframework + # djangorestframework-simplejwt +django-cache-url==3.4.5 \ + --hash=sha256:5f350759978483ab85dc0e3e17b3d53eed3394a28148f6bf0f53d11d0feb5b3c \ + --hash=sha256:eb9fb194717524348c95cad9905b70b647452741c1d9e481fac6d2125f0ad917 + # via environs +django-cors-headers==4.3.1 \ + --hash=sha256:0b1fd19297e37417fc9f835d39e45c8c642938ddba1acce0c1753d3edef04f36 \ + --hash=sha256:0bf65ef45e606aff1994d35503e6b677c0b26cafff6506f8fd7187f3be840207 + # via -r ./requirements/requirements.in +django-csp==3.8 \ + --hash=sha256:19b2978b03fcd73517d7d67acbc04fbbcaec0facc3e83baa502965892d1e0719 \ + --hash=sha256:ef0f1a9f7d8da68ae6e169c02e9ac661c0ecf04db70e0d1d85640512a68471c0 + # via -r ./requirements/requirements.in +django-dbbackup==4.1.0 \ + --hash=sha256:c411d38d0f8e60ab3254017278c14ebd75d4001b5634fc73be7fbe8a5260583b \ + --hash=sha256:c539b5246b429a22a8efadbab3719ee6b8eda45c66c4ff6592056c590d51c782 + # via -r ./requirements/requirements.in +django-fsm==3.0.0 \ + --hash=sha256:0112bcac573ad14051cf8ebe73bf296b6d5409f093e5f1677eb16e2196e263b3 \ + --hash=sha256:fa28f84f47eae7ce9247585ac6c1895e4ada08efff93fb243a59e9ff77b2d4ec + # via -r ./requirements/requirements.in +django-storages[boto3]==1.14.2 \ + --hash=sha256:1db759346b52ada6c2efd9f23d8241ecf518813eb31db9e2589207174f58f6ad \ + --hash=sha256:51b36af28cc5813b98d5f3dfe7459af638d84428c8df4a03990c7d74d1bea4e5 + # via -r ./requirements/requirements.in +djangorestframework==3.15.2 \ + --hash=sha256:2b8871b062ba1aefc2de01f773875441a961fefbf79f5eed1e32b2f096944b20 \ + --hash=sha256:36fe88cd2d6c6bec23dca9804bab2ba5517a8bb9d8f47ebc68981b56840107ad + # via + # -r ./requirements/requirements.in + # djangorestframework-simplejwt +djangorestframework-simplejwt==5.3.1 \ + --hash=sha256:381bc966aa46913905629d472cd72ad45faa265509764e20ffd440164c88d220 \ + --hash=sha256:6c4bd37537440bc439564ebf7d6085e74c5411485197073f508ebdfa34bc9fae + # via -r ./requirements/requirements.in +environs[django]==11.0.0 \ + --hash=sha256:069727a8f73d8ba8d033d3cd95c0da231d44f38f1da773bf076cef168d312ee8 \ + --hash=sha256:e0bcfd41c718c07a7db422f9109e490746450da38793fe4ee197f397b9343435 + # via -r ./requirements/requirements.in +et-xmlfile==1.1.0 \ + --hash=sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c \ + --hash=sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada + # via openpyxl +faker==24.9.0 \ + --hash=sha256:73b1e7967b0ceeac42fc99a8c973bb49e4499cc4044d20d17ab661d5cb7eda1d \ + --hash=sha256:97c7874665e8eb7b517f97bf3b59f03bf3f07513fe2c159e98b6b9ea6b9f2b3d + # via -r ./requirements/requirements.in +fs==2.4.16 \ + --hash=sha256:660064febbccda264ae0b6bace80a8d1be9e089e0a5eb2427b7d517f9a91545c \ + --hash=sha256:ae97c7d51213f4b70b6a958292530289090de3a7e15841e108fbe144f069d313 + # via -r ./requirements/requirements.in +furl==2.1.3 \ + --hash=sha256:5a6188fe2666c484a12159c18be97a1977a71d632ef5bb867ef15f54af39cc4e \ + --hash=sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0 + # via cfenv +future==1.0.0 \ + --hash=sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216 \ + --hash=sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05 + # via pyjwkest +gevent==24.2.1 \ + --hash=sha256:03aa5879acd6b7076f6a2a307410fb1e0d288b84b03cdfd8c74db8b4bc882fc5 \ + --hash=sha256:117e5837bc74a1673605fb53f8bfe22feb6e5afa411f524c835b2ddf768db0de \ + --hash=sha256:141a2b24ad14f7b9576965c0c84927fc85f824a9bb19f6ec1e61e845d87c9cd8 \ + --hash=sha256:14532a67f7cb29fb055a0e9b39f16b88ed22c66b96641df8c04bdc38c26b9ea5 \ + --hash=sha256:1dffb395e500613e0452b9503153f8f7ba587c67dd4a85fc7cd7aa7430cb02cc \ + --hash=sha256:2955eea9c44c842c626feebf4459c42ce168685aa99594e049d03bedf53c2800 \ + --hash=sha256:2ae3a25ecce0a5b0cd0808ab716bfca180230112bb4bc89b46ae0061d62d4afe \ + --hash=sha256:2e9ac06f225b696cdedbb22f9e805e2dd87bf82e8fa5e17756f94e88a9d37cf7 \ + --hash=sha256:368a277bd9278ddb0fde308e6a43f544222d76ed0c4166e0d9f6b036586819d9 \ + --hash=sha256:3adfb96637f44010be8abd1b5e73b5070f851b817a0b182e601202f20fa06533 \ + --hash=sha256:3d5325ccfadfd3dcf72ff88a92fb8fc0b56cacc7225f0f4b6dcf186c1a6eeabc \ + --hash=sha256:432fc76f680acf7cf188c2ee0f5d3ab73b63c1f03114c7cd8a34cebbe5aa2056 \ + --hash=sha256:44098038d5e2749b0784aabb27f1fcbb3f43edebedf64d0af0d26955611be8d6 \ + --hash=sha256:5a1df555431f5cd5cc189a6ee3544d24f8c52f2529134685f1e878c4972ab026 \ + --hash=sha256:6c47ae7d1174617b3509f5d884935e788f325eb8f1a7efc95d295c68d83cce40 \ + --hash=sha256:6f947a9abc1a129858391b3d9334c45041c08a0f23d14333d5b844b6e5c17a07 \ + --hash=sha256:782a771424fe74bc7e75c228a1da671578c2ba4ddb2ca09b8f959abdf787331e \ + --hash=sha256:7899a38d0ae7e817e99adb217f586d0a4620e315e4de577444ebeeed2c5729be \ + --hash=sha256:7b00f8c9065de3ad226f7979154a7b27f3b9151c8055c162332369262fc025d8 \ + --hash=sha256:8f4b8e777d39013595a7740b4463e61b1cfe5f462f1b609b28fbc1e4c4ff01e5 \ + --hash=sha256:90cbac1ec05b305a1b90ede61ef73126afdeb5a804ae04480d6da12c56378df1 \ + --hash=sha256:918cdf8751b24986f915d743225ad6b702f83e1106e08a63b736e3a4c6ead789 \ + --hash=sha256:9202f22ef811053077d01f43cc02b4aaf4472792f9fd0f5081b0b05c926cca19 \ + --hash=sha256:94138682e68ec197db42ad7442d3cf9b328069c3ad8e4e5022e6b5cd3e7ffae5 \ + --hash=sha256:968581d1717bbcf170758580f5f97a2925854943c45a19be4d47299507db2eb7 \ + --hash=sha256:9d8d0642c63d453179058abc4143e30718b19a85cbf58c2744c9a63f06a1d388 \ + --hash=sha256:a7ceb59986456ce851160867ce4929edaffbd2f069ae25717150199f8e1548b8 \ + --hash=sha256:b9913c45d1be52d7a5db0c63977eebb51f68a2d5e6fd922d1d9b5e5fd758cc98 \ + --hash=sha256:bde283313daf0b34a8d1bab30325f5cb0f4e11b5869dbe5bc61f8fe09a8f66f3 \ + --hash=sha256:bf5b9c72b884c6f0c4ed26ef204ee1f768b9437330422492c319470954bc4cc7 \ + --hash=sha256:ca80b121bbec76d7794fcb45e65a7eca660a76cc1a104ed439cdbd7df5f0b060 \ + --hash=sha256:cdf66977a976d6a3cfb006afdf825d1482f84f7b81179db33941f2fc9673bb1d \ + --hash=sha256:d4faf846ed132fd7ebfbbf4fde588a62d21faa0faa06e6f468b7faa6f436b661 \ + --hash=sha256:d7f87c2c02e03d99b95cfa6f7a776409083a9e4d468912e18c7680437b29222c \ + --hash=sha256:dd23df885318391856415e20acfd51a985cba6919f0be78ed89f5db9ff3a31cb \ + --hash=sha256:f5de3c676e57177b38857f6e3cdfbe8f38d1cd754b63200c0615eaa31f514b4f \ + --hash=sha256:f5e8e8d60e18d5f7fd49983f0c4696deeddaf6e608fbab33397671e2fcc6cc91 \ + --hash=sha256:f7cac622e11b4253ac4536a654fe221249065d9a69feb6cdcd4d9af3503602e0 \ + --hash=sha256:f8a04cf0c5b7139bc6368b461257d4a757ea2fe89b3773e494d235b7dd51119f \ + --hash=sha256:f8bb35ce57a63c9a6896c71a285818a3922d8ca05d150fd1fe49a7f57287b836 \ + --hash=sha256:fbfdce91239fe306772faab57597186710d5699213f4df099d1612da7320d682 + # via gunicorn +greenlet==3.0.3 \ + --hash=sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67 \ + --hash=sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6 \ + --hash=sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257 \ + --hash=sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4 \ + --hash=sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676 \ + --hash=sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61 \ + --hash=sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc \ + --hash=sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca \ + --hash=sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7 \ + --hash=sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728 \ + --hash=sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305 \ + --hash=sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6 \ + --hash=sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379 \ + --hash=sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414 \ + --hash=sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04 \ + --hash=sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a \ + --hash=sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf \ + --hash=sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491 \ + --hash=sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559 \ + --hash=sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e \ + --hash=sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274 \ + --hash=sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb \ + --hash=sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b \ + --hash=sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9 \ + --hash=sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b \ + --hash=sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be \ + --hash=sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506 \ + --hash=sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405 \ + --hash=sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113 \ + --hash=sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f \ + --hash=sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5 \ + --hash=sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230 \ + --hash=sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d \ + --hash=sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f \ + --hash=sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a \ + --hash=sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e \ + --hash=sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61 \ + --hash=sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6 \ + --hash=sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d \ + --hash=sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71 \ + --hash=sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22 \ + --hash=sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2 \ + --hash=sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3 \ + --hash=sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067 \ + --hash=sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc \ + --hash=sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881 \ + --hash=sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3 \ + --hash=sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e \ + --hash=sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac \ + --hash=sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53 \ + --hash=sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0 \ + --hash=sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b \ + --hash=sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83 \ + --hash=sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41 \ + --hash=sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c \ + --hash=sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf \ + --hash=sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da \ + --hash=sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33 + # via + # -r ./requirements/requirements.in + # gevent + # sqlalchemy +gunicorn[gevent]==22.0.0 \ + --hash=sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9 \ + --hash=sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63 + # via -r ./requirements/requirements.in +idna==3.7 \ + --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ + --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 + # via requests +jmespath==1.0.1 \ + --hash=sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980 \ + --hash=sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe + # via + # boto3 + # botocore +jsonpath-ng==1.6.1 \ + --hash=sha256:086c37ba4917304850bd837aeab806670224d3f038fe2833ff593a672ef0a5fa \ + --hash=sha256:8f22cd8273d7772eea9aaa84d922e0841aa36fdb8a2c6b7f6c3791a16a9bc0be + # via -r ./requirements/requirements.in +jsonschema==4.21.1 \ + --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \ + --hash=sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5 + # via -r ./requirements/requirements.in +jsonschema-specifications==2023.12.1 \ + --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ + --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c + # via jsonschema +mako==1.3.3 \ + --hash=sha256:5324b88089a8978bf76d1629774fcc2f1c07b82acdf00f4c5dd8ceadfffc4b40 \ + --hash=sha256:e16c01d9ab9c11f7290eef1cfefc093fb5a45ee4a3da09e2fec2e4d1bae54e73 + # via oic +markupsafe==2.1.5 \ + --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ + --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ + --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ + --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ + --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ + --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ + --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ + --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ + --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ + --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ + --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ + --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ + --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ + --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ + --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ + --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ + --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ + --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ + --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ + --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ + --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ + --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ + --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ + --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ + --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ + --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ + --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ + --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ + --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ + --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ + --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ + --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ + --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ + --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ + --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ + --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ + --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ + --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ + --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ + --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ + --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ + --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ + --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ + --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ + --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ + --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ + --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ + --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ + --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ + --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ + --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ + --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ + --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ + --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ + --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ + --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ + --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ + --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ + --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ + --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 + # via mako +marshmallow==3.21.1 \ + --hash=sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3 \ + --hash=sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633 + # via environs +newrelic==9.11.0 \ + --hash=sha256:02eab15af4a08b870bcfdbc56390ecbb9dcacd144fe77f39a26d1be207bd30f0 \ + --hash=sha256:0d43a0891bf71333f6a6253cf87dea2c9009e22699a2acfd93608125a33b1936 \ + --hash=sha256:10cb7f7a78c49580602b90f367f3378264e495f2f3706734f88ced7e7ca9b033 \ + --hash=sha256:11653fd14f55999c5058b4dde8c721833076c0bd3efe668296725a622e9e7de8 \ + --hash=sha256:1f6e1bb0df8ff2b54195baac41fddc0e15ea1bdf1deb6af49153487696355181 \ + --hash=sha256:2010ed2793294a7e3c1057ec301d48997ed05dcef114d4c25120ac771f66bac1 \ + --hash=sha256:21e7b52d5b214bba3534ced166e6ec991117772815020bec38b0571fdcecbaf4 \ + --hash=sha256:2b02139458aefba86a4572cb8214f91a942103d24d5502395f64d6d7a4ad3f25 \ + --hash=sha256:3283885bcf31d9cbf8facb0004508a4eaa652a62471e0b724d26f9738a291979 \ + --hash=sha256:34b25d1beaf19825409f3d915a5bafa87b7b9230415821422be1e78e988750b7 \ + --hash=sha256:35d08587e694f5c517e55fb7119f924c64569d2e7ec4968ef761fc1f7bd1f40c \ + --hash=sha256:553674a66ef2c2206852b415b74e3c2fb7ed2b92e9800b68394d577f6aa1133e \ + --hash=sha256:5f477cdda9b998205084b822089b3ee4a8a2d9cd66b6f12487c9f9002566c5cb \ + --hash=sha256:6ceac1d8f13da38fa1b41c8202a91d3b4345e06adb655deaae0df08911fda56f \ + --hash=sha256:72dd3eb190c62bb54aa59029f0d6ac1420c2050b3aaf88d947fc7f62ec58d97f \ + --hash=sha256:76eb4cc599645a38a459b0002696d9c84844fecb02cf07bc18a4a91f737e438e \ + --hash=sha256:7c073f4c26539d6d74fbf4bac7f5046cac578975fb2cf77b156f802f1b39835e \ + --hash=sha256:7f1e473eb0505cb91ab9a4155321eabe13a2f6b93fb3c41d6f10e5486276be60 \ + --hash=sha256:8664e3b9e6ee0f78806b0cf7c90656a1a86d13232c2e0be18a1b1eb452f3f5d1 \ + --hash=sha256:87670d872c3abc36203e10f93d266c8f36ad2bd06fb54e790001a409f9e2f40f \ + --hash=sha256:94369792d61ccf21469c35cf66886c32350a180d8e782c0d28ec66411db29474 \ + --hash=sha256:9f95eb366ff714bce32476d256551b853247a72398ec46a89148ef5108509aa8 \ + --hash=sha256:b33539345c7cf349b65a176a30ab38e2998b071512a7450f5c5b89ac6c097006 \ + --hash=sha256:b5d2d0814e1aa9de5bd55797ff8c426d98200ba46ca14dbca15557d0f17cfb4e \ + --hash=sha256:b7903ba71ce5a4b2840f6d3c63ecd0fb3a018d2aceb915b48133c13c4a60185f \ + --hash=sha256:bc5c1b8a51946f64c34fc5fa29ce0221c4927a65c7f4435b3b8adeb29b9812d2 \ + --hash=sha256:d88fa17a515fb002eb14570800e4bfa69ac87ac27e6e2a96bc2bc9b60c80057a \ + --hash=sha256:dcec4173cd0f83420e6f61f92955065f1d460075af5e5bf88a5fea746e3cc180 \ + --hash=sha256:ffc0d8d490de0f12df70db637481aaadb8a43fb6d71ba8866dc14242aa5edad4 + # via -r ./requirements/requirements.in +numpy==1.26.4 \ + --hash=sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b \ + --hash=sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818 \ + --hash=sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20 \ + --hash=sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0 \ + --hash=sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010 \ + --hash=sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a \ + --hash=sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea \ + --hash=sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c \ + --hash=sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71 \ + --hash=sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110 \ + --hash=sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be \ + --hash=sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a \ + --hash=sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a \ + --hash=sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5 \ + --hash=sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed \ + --hash=sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd \ + --hash=sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c \ + --hash=sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e \ + --hash=sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0 \ + --hash=sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c \ + --hash=sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a \ + --hash=sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b \ + --hash=sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0 \ + --hash=sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6 \ + --hash=sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2 \ + --hash=sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a \ + --hash=sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30 \ + --hash=sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218 \ + --hash=sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5 \ + --hash=sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07 \ + --hash=sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2 \ + --hash=sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4 \ + --hash=sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764 \ + --hash=sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef \ + --hash=sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3 \ + --hash=sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f + # via pandas +oic==1.6.1 \ + --hash=sha256:385a1f64bb59519df1e23840530921bf416740240f505ea6d161e331d3d39fad \ + --hash=sha256:fcbf948a22e4d4df66f6bf57d327933f32a7b539640d9b42883457634360ba78 + # via -r ./requirements/requirements.in +openpyxl==3.1.2 \ + --hash=sha256:a6f5977418eff3b2d5500d54d9db50c8277a368436f4e4f8ddb1be3422870184 \ + --hash=sha256:f91456ead12ab3c6c2e9491cf33ba6d08357d802192379bb482f1033ade496f5 + # via -r ./requirements/requirements.in +orderedmultidict==1.0.1 \ + --hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \ + --hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3 + # via furl +packaging==24.0 \ + --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ + --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 + # via + # gunicorn + # marshmallow +pandas==2.2.2 \ + --hash=sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863 \ + --hash=sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2 \ + --hash=sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1 \ + --hash=sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad \ + --hash=sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db \ + --hash=sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76 \ + --hash=sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51 \ + --hash=sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32 \ + --hash=sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08 \ + --hash=sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b \ + --hash=sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4 \ + --hash=sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921 \ + --hash=sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288 \ + --hash=sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee \ + --hash=sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0 \ + --hash=sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24 \ + --hash=sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99 \ + --hash=sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151 \ + --hash=sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce \ + --hash=sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57 \ + --hash=sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef \ + --hash=sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54 \ + --hash=sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a \ + --hash=sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23 \ + --hash=sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772 \ + --hash=sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce \ + --hash=sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad + # via -r ./requirements/requirements.in +peewee==3.17.1 \ + --hash=sha256:e009ac4227c4fdc0058a56e822ad5987684f0a1fbb20fed577200785102581c3 + # via -r ./requirements/requirements.in +ply==3.11 \ + --hash=sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3 \ + --hash=sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce + # via jsonpath-ng +psycogreen==1.0.2 \ + --hash=sha256:c429845a8a49cf2f76b71265008760bcd7c7c77d80b806db4dc81116dbcd130d + # via -r ./requirements/requirements.in +psycopg2-binary==2.9.9 \ + --hash=sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9 \ + --hash=sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77 \ + --hash=sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e \ + --hash=sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84 \ + --hash=sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3 \ + --hash=sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2 \ + --hash=sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67 \ + --hash=sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876 \ + --hash=sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152 \ + --hash=sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f \ + --hash=sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a \ + --hash=sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6 \ + --hash=sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503 \ + --hash=sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f \ + --hash=sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493 \ + --hash=sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996 \ + --hash=sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f \ + --hash=sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e \ + --hash=sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59 \ + --hash=sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94 \ + --hash=sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7 \ + --hash=sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682 \ + --hash=sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420 \ + --hash=sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae \ + --hash=sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291 \ + --hash=sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe \ + --hash=sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980 \ + --hash=sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93 \ + --hash=sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692 \ + --hash=sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119 \ + --hash=sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716 \ + --hash=sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472 \ + --hash=sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b \ + --hash=sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2 \ + --hash=sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc \ + --hash=sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c \ + --hash=sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5 \ + --hash=sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab \ + --hash=sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984 \ + --hash=sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9 \ + --hash=sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf \ + --hash=sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0 \ + --hash=sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f \ + --hash=sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212 \ + --hash=sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb \ + --hash=sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be \ + --hash=sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90 \ + --hash=sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041 \ + --hash=sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7 \ + --hash=sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860 \ + --hash=sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d \ + --hash=sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245 \ + --hash=sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27 \ + --hash=sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417 \ + --hash=sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359 \ + --hash=sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202 \ + --hash=sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0 \ + --hash=sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7 \ + --hash=sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba \ + --hash=sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1 \ + --hash=sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd \ + --hash=sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07 \ + --hash=sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98 \ + --hash=sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55 \ + --hash=sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d \ + --hash=sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972 \ + --hash=sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f \ + --hash=sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e \ + --hash=sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26 \ + --hash=sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957 \ + --hash=sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53 \ + --hash=sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52 + # via -r ./requirements/requirements.in +pycparser==2.22 \ + --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ + --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc + # via cffi +pycryptodome==3.20.0 \ + --hash=sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690 \ + --hash=sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7 \ + --hash=sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4 \ + --hash=sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd \ + --hash=sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5 \ + --hash=sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc \ + --hash=sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818 \ + --hash=sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab \ + --hash=sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d \ + --hash=sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a \ + --hash=sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25 \ + --hash=sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091 \ + --hash=sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea \ + --hash=sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a \ + --hash=sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c \ + --hash=sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72 \ + --hash=sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9 \ + --hash=sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6 \ + --hash=sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044 \ + --hash=sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04 \ + --hash=sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c \ + --hash=sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e \ + --hash=sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f \ + --hash=sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b \ + --hash=sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4 \ + --hash=sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33 \ + --hash=sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f \ + --hash=sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e \ + --hash=sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a \ + --hash=sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2 \ + --hash=sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3 \ + --hash=sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128 + # via -r ./requirements/requirements.in +pycryptodomex==3.20.0 \ + --hash=sha256:0daad007b685db36d977f9de73f61f8da2a7104e20aca3effd30752fd56f73e1 \ + --hash=sha256:108e5f1c1cd70ffce0b68739c75734437c919d2eaec8e85bffc2c8b4d2794305 \ + --hash=sha256:19764605feea0df966445d46533729b645033f134baeb3ea26ad518c9fdf212c \ + --hash=sha256:1be97461c439a6af4fe1cf8bf6ca5936d3db252737d2f379cc6b2e394e12a458 \ + --hash=sha256:25cd61e846aaab76d5791d006497134602a9e451e954833018161befc3b5b9ed \ + --hash=sha256:2a47bcc478741b71273b917232f521fd5704ab4b25d301669879e7273d3586cc \ + --hash=sha256:59af01efb011b0e8b686ba7758d59cf4a8263f9ad35911bfe3f416cee4f5c08c \ + --hash=sha256:5dcac11031a71348faaed1f403a0debd56bf5404232284cf8c761ff918886ebc \ + --hash=sha256:62a5ec91388984909bb5398ea49ee61b68ecb579123694bffa172c3b0a107079 \ + --hash=sha256:645bd4ca6f543685d643dadf6a856cc382b654cc923460e3a10a49c1b3832aeb \ + --hash=sha256:653b29b0819605fe0898829c8ad6400a6ccde096146730c2da54eede9b7b8baa \ + --hash=sha256:69138068268127cd605e03438312d8f271135a33140e2742b417d027a0539427 \ + --hash=sha256:6e186342cfcc3aafaad565cbd496060e5a614b441cacc3995ef0091115c1f6c5 \ + --hash=sha256:76bd15bb65c14900d98835fcd10f59e5e0435077431d3a394b60b15864fddd64 \ + --hash=sha256:7805830e0c56d88f4d491fa5ac640dfc894c5ec570d1ece6ed1546e9df2e98d6 \ + --hash=sha256:7a710b79baddd65b806402e14766c721aee8fb83381769c27920f26476276c1e \ + --hash=sha256:7a7a8f33a1f1fb762ede6cc9cbab8f2a9ba13b196bfaf7bc6f0b39d2ba315a43 \ + --hash=sha256:82ee7696ed8eb9a82c7037f32ba9b7c59e51dda6f105b39f043b6ef293989cb3 \ + --hash=sha256:88afd7a3af7ddddd42c2deda43d53d3dfc016c11327d0915f90ca34ebda91499 \ + --hash=sha256:8af1a451ff9e123d0d8bd5d5e60f8e3315c3a64f3cdd6bc853e26090e195cdc8 \ + --hash=sha256:8ee606964553c1a0bc74057dd8782a37d1c2bc0f01b83193b6f8bb14523b877b \ + --hash=sha256:91852d4480a4537d169c29a9d104dda44094c78f1f5b67bca76c29a91042b623 \ + --hash=sha256:9c682436c359b5ada67e882fec34689726a09c461efd75b6ea77b2403d5665b7 \ + --hash=sha256:bc3ee1b4d97081260d92ae813a83de4d2653206967c4a0a017580f8b9548ddbc \ + --hash=sha256:bca649483d5ed251d06daf25957f802e44e6bb6df2e8f218ae71968ff8f8edc4 \ + --hash=sha256:c39778fd0548d78917b61f03c1fa8bfda6cfcf98c767decf360945fe6f97461e \ + --hash=sha256:cbe71b6712429650e3883dc81286edb94c328ffcd24849accac0a4dbcc76958a \ + --hash=sha256:d00fe8596e1cc46b44bf3907354e9377aa030ec4cd04afbbf6e899fc1e2a7781 \ + --hash=sha256:d3584623e68a5064a04748fb6d76117a21a7cb5eaba20608a41c7d0c61721794 \ + --hash=sha256:e48217c7901edd95f9f097feaa0388da215ed14ce2ece803d3f300b4e694abea \ + --hash=sha256:f2e497413560e03421484189a6b65e33fe800d3bd75590e6d78d4dfdb7accf3b \ + --hash=sha256:ff5c9a67f8a4fba4aed887216e32cbc48f2a6fb2673bb10a99e43be463e15913 + # via + # -r ./requirements/requirements.in + # oic + # pyjwkest +pydantic==2.7.0 \ + --hash=sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352 \ + --hash=sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383 + # via pydantic-settings +pydantic-core==2.18.1 \ + --hash=sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6 \ + --hash=sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb \ + --hash=sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0 \ + --hash=sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6 \ + --hash=sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47 \ + --hash=sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a \ + --hash=sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a \ + --hash=sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac \ + --hash=sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88 \ + --hash=sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db \ + --hash=sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d \ + --hash=sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d \ + --hash=sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9 \ + --hash=sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e \ + --hash=sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b \ + --hash=sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d \ + --hash=sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649 \ + --hash=sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c \ + --hash=sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1 \ + --hash=sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09 \ + --hash=sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0 \ + --hash=sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90 \ + --hash=sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d \ + --hash=sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294 \ + --hash=sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144 \ + --hash=sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b \ + --hash=sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1 \ + --hash=sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b \ + --hash=sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2 \ + --hash=sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad \ + --hash=sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622 \ + --hash=sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17 \ + --hash=sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06 \ + --hash=sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc \ + --hash=sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50 \ + --hash=sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d \ + --hash=sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59 \ + --hash=sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539 \ + --hash=sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a \ + --hash=sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b \ + --hash=sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5 \ + --hash=sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9 \ + --hash=sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278 \ + --hash=sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6 \ + --hash=sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44 \ + --hash=sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0 \ + --hash=sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb \ + --hash=sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80 \ + --hash=sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5 \ + --hash=sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570 \ + --hash=sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b \ + --hash=sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de \ + --hash=sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6 \ + --hash=sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8 \ + --hash=sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203 \ + --hash=sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7 \ + --hash=sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048 \ + --hash=sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae \ + --hash=sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89 \ + --hash=sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f \ + --hash=sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926 \ + --hash=sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2 \ + --hash=sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76 \ + --hash=sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d \ + --hash=sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411 \ + --hash=sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9 \ + --hash=sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2 \ + --hash=sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586 \ + --hash=sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35 \ + --hash=sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c \ + --hash=sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143 \ + --hash=sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6 \ + --hash=sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60 \ + --hash=sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b \ + --hash=sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226 \ + --hash=sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519 \ + --hash=sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31 \ + --hash=sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7 \ + --hash=sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b + # via pydantic +pydantic-settings==2.2.1 \ + --hash=sha256:00b9f6a5e95553590434c0fa01ead0b216c3e10bc54ae02e37f359948643c5ed \ + --hash=sha256:0235391d26db4d2190cb9b31051c4b46882d28a51533f97440867f012d4da091 + # via oic +pydash==8.0.0 \ + --hash=sha256:5a90d98b5f370bb9620f786221579df8f83d54f1f58de6a66f52b1bdba7175d1 \ + --hash=sha256:be2c35df332473a5a939d485422c71a03b19b1a98d6559efb832eadb4a2cfc36 + # via -r ./requirements/requirements.in +pyjwkest==1.4.2 \ + --hash=sha256:5560fd5ba08655f29ff6ad1df1e15dc05abc9d976fcbcec8d2b5167f49b70222 + # via oic +pyjwt==2.8.0 \ + --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ + --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 + # via + # -r ./requirements/requirements.in + # djangorestframework-simplejwt +pypdf==4.2.0 \ + --hash=sha256:dc035581664e0ad717e3492acebc1a5fc23dba759e788e3d4a9fc9b1a32e72c1 \ + --hash=sha256:fe63f3f7d1dcda1c9374421a94c1bba6c6f8c4a62173a59b64ffd52058f846b1 + # via -r ./requirements/requirements.in +python-dateutil==2.9.0.post0 \ + --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ + --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 + # via + # botocore + # faker + # pandas +python-dotenv==1.0.1 \ + --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \ + --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a + # via + # environs + # pydantic-settings +python-json-logger==2.0.7 \ + --hash=sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c \ + --hash=sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd + # via -r ./requirements/requirements.in +python-slugify==8.0.4 \ + --hash=sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8 \ + --hash=sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856 + # via -r ./requirements/requirements.in +pytz==2024.1 \ + --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \ + --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319 + # via + # django-dbbackup + # pandas +pyyaml==6.0.1 \ + --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \ + --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ + --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \ + --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ + --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ + --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ + --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ + --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ + --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ + --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ + --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \ + --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \ + --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ + --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \ + --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ + --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ + --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ + --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ + --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ + --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ + --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ + --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \ + --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ + --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ + --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ + --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \ + --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ + --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ + --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ + --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ + --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ + --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ + --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ + --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ + --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ + --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ + --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ + --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ + --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ + --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ + --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ + --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \ + --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ + --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \ + --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ + --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ + --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ + --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ + --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ + --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f + # via -r ./requirements/requirements.in +referencing==0.34.0 \ + --hash=sha256:5773bd84ef41799a5a8ca72dc34590c041eb01bf9aa02632b4a973fb0181a844 \ + --hash=sha256:d53ae300ceddd3169f1ffa9caf2cb7b769e92657e4fafb23d34b93679116dfd4 + # via + # jsonschema + # jsonschema-specifications +requests==2.32.3 \ + --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ + --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 + # via + # -r ./requirements/requirements.in + # oic + # pyjwkest +rpds-py==0.18.0 \ + --hash=sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f \ + --hash=sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c \ + --hash=sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76 \ + --hash=sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e \ + --hash=sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157 \ + --hash=sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f \ + --hash=sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5 \ + --hash=sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05 \ + --hash=sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24 \ + --hash=sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1 \ + --hash=sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8 \ + --hash=sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b \ + --hash=sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb \ + --hash=sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07 \ + --hash=sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1 \ + --hash=sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6 \ + --hash=sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e \ + --hash=sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e \ + --hash=sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1 \ + --hash=sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab \ + --hash=sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4 \ + --hash=sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17 \ + --hash=sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594 \ + --hash=sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d \ + --hash=sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d \ + --hash=sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3 \ + --hash=sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c \ + --hash=sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66 \ + --hash=sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f \ + --hash=sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80 \ + --hash=sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33 \ + --hash=sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f \ + --hash=sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c \ + --hash=sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022 \ + --hash=sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e \ + --hash=sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f \ + --hash=sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da \ + --hash=sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1 \ + --hash=sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688 \ + --hash=sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795 \ + --hash=sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c \ + --hash=sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98 \ + --hash=sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1 \ + --hash=sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20 \ + --hash=sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307 \ + --hash=sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4 \ + --hash=sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18 \ + --hash=sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294 \ + --hash=sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66 \ + --hash=sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467 \ + --hash=sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948 \ + --hash=sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e \ + --hash=sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1 \ + --hash=sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0 \ + --hash=sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7 \ + --hash=sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd \ + --hash=sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641 \ + --hash=sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d \ + --hash=sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9 \ + --hash=sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1 \ + --hash=sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da \ + --hash=sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3 \ + --hash=sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa \ + --hash=sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7 \ + --hash=sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40 \ + --hash=sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496 \ + --hash=sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124 \ + --hash=sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836 \ + --hash=sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434 \ + --hash=sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984 \ + --hash=sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f \ + --hash=sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6 \ + --hash=sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e \ + --hash=sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461 \ + --hash=sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c \ + --hash=sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432 \ + --hash=sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73 \ + --hash=sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58 \ + --hash=sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88 \ + --hash=sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337 \ + --hash=sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7 \ + --hash=sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863 \ + --hash=sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475 \ + --hash=sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3 \ + --hash=sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51 \ + --hash=sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf \ + --hash=sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024 \ + --hash=sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40 \ + --hash=sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9 \ + --hash=sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec \ + --hash=sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb \ + --hash=sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7 \ + --hash=sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861 \ + --hash=sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880 \ + --hash=sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f \ + --hash=sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd \ + --hash=sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca \ + --hash=sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58 \ + --hash=sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e + # via + # jsonschema + # referencing +s3transfer==0.10.1 \ + --hash=sha256:5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19 \ + --hash=sha256:ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d + # via boto3 +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # fs + # furl + # orderedmultidict + # pyjwkest + # python-dateutil +sqlalchemy==2.0.29 \ + --hash=sha256:01d10638a37460616708062a40c7b55f73e4d35eaa146781c683e0fa7f6c43fb \ + --hash=sha256:04c487305ab035a9548f573763915189fc0fe0824d9ba28433196f8436f1449c \ + --hash=sha256:0dfefdb3e54cd15f5d56fd5ae32f1da2d95d78319c1f6dfb9bcd0eb15d603d5d \ + --hash=sha256:0f3ca96af060a5250a8ad5a63699180bc780c2edf8abf96c58af175921df847a \ + --hash=sha256:205f5a2b39d7c380cbc3b5dcc8f2762fb5bcb716838e2d26ccbc54330775b003 \ + --hash=sha256:25664e18bef6dc45015b08f99c63952a53a0a61f61f2e48a9e70cec27e55f699 \ + --hash=sha256:296195df68326a48385e7a96e877bc19aa210e485fa381c5246bc0234c36c78e \ + --hash=sha256:2a0732dffe32333211801b28339d2a0babc1971bc90a983e3035e7b0d6f06b93 \ + --hash=sha256:3071ad498896907a5ef756206b9dc750f8e57352113c19272bdfdc429c7bd7de \ + --hash=sha256:308ef9cb41d099099fffc9d35781638986870b29f744382904bf9c7dadd08513 \ + --hash=sha256:334184d1ab8f4c87f9652b048af3f7abea1c809dfe526fb0435348a6fef3d380 \ + --hash=sha256:38b624e5cf02a69b113c8047cf7f66b5dfe4a2ca07ff8b8716da4f1b3ae81567 \ + --hash=sha256:471fcb39c6adf37f820350c28aac4a7df9d3940c6548b624a642852e727ea586 \ + --hash=sha256:4c142852ae192e9fe5aad5c350ea6befe9db14370b34047e1f0f7cf99e63c63b \ + --hash=sha256:4f6d971255d9ddbd3189e2e79d743ff4845c07f0633adfd1de3f63d930dbe673 \ + --hash=sha256:52c8011088305476691b8750c60e03b87910a123cfd9ad48576d6414b6ec2a1d \ + --hash=sha256:52de4736404e53c5c6a91ef2698c01e52333988ebdc218f14c833237a0804f1b \ + --hash=sha256:5c7b02525ede2a164c5fa5014915ba3591730f2cc831f5be9ff3b7fd3e30958e \ + --hash=sha256:5ef3fbccb4058355053c51b82fd3501a6e13dd808c8d8cd2561e610c5456013c \ + --hash=sha256:5f20cb0a63a3e0ec4e169aa8890e32b949c8145983afa13a708bc4b0a1f30e03 \ + --hash=sha256:61405ea2d563407d316c63a7b5271ae5d274a2a9fbcd01b0aa5503635699fa1e \ + --hash=sha256:77d29cb6c34b14af8a484e831ab530c0f7188f8efed1c6a833a2c674bf3c26ec \ + --hash=sha256:7b184e3de58009cc0bf32e20f137f1ec75a32470f5fede06c58f6c355ed42a72 \ + --hash=sha256:7e614d7a25a43a9f54fcce4675c12761b248547f3d41b195e8010ca7297c369c \ + --hash=sha256:8197d6f7a3d2b468861ebb4c9f998b9df9e358d6e1cf9c2a01061cb9b6cf4e41 \ + --hash=sha256:87a1d53a5382cdbbf4b7619f107cc862c1b0a4feb29000922db72e5a66a5ffc0 \ + --hash=sha256:8c37f1050feb91f3d6c32f864d8e114ff5545a4a7afe56778d76a9aec62638ba \ + --hash=sha256:90453597a753322d6aa770c5935887ab1fc49cc4c4fdd436901308383d698b4b \ + --hash=sha256:988569c8732f54ad3234cf9c561364221a9e943b78dc7a4aaf35ccc2265f1930 \ + --hash=sha256:99a1e69d4e26f71e750e9ad6fdc8614fbddb67cfe2173a3628a2566034e223c7 \ + --hash=sha256:9b19836ccca0d321e237560e475fd99c3d8655d03da80c845c4da20dda31b6e1 \ + --hash=sha256:9d6753305936eddc8ed190e006b7bb33a8f50b9854823485eed3a886857ab8d1 \ + --hash=sha256:a13b917b4ffe5a0a31b83d051d60477819ddf18276852ea68037a144a506efb9 \ + --hash=sha256:a88913000da9205b13f6f195f0813b6ffd8a0c0c2bd58d499e00a30eb508870c \ + --hash=sha256:b2a0e3cf0caac2085ff172c3faacd1e00c376e6884b5bc4dd5b6b84623e29e4f \ + --hash=sha256:b5d7ed79df55a731749ce65ec20d666d82b185fa4898430b17cb90c892741520 \ + --hash=sha256:bab41acf151cd68bc2b466deae5deeb9e8ae9c50ad113444151ad965d5bf685b \ + --hash=sha256:bd9566b8e58cabd700bc367b60e90d9349cd16f0984973f98a9a09f9c64e86f0 \ + --hash=sha256:bda7ce59b06d0f09afe22c56714c65c957b1068dee3d5e74d743edec7daba552 \ + --hash=sha256:c2f9c762a2735600654c654bf48dad388b888f8ce387b095806480e6e4ff6907 \ + --hash=sha256:c4520047006b1d3f0d89e0532978c0688219857eb2fee7c48052560ae76aca1e \ + --hash=sha256:d96710d834a6fb31e21381c6d7b76ec729bd08c75a25a5184b1089141356171f \ + --hash=sha256:dba622396a3170974f81bad49aacebd243455ec3cc70615aeaef9e9613b5bca5 \ + --hash=sha256:dc4ee2d4ee43251905f88637d5281a8d52e916a021384ec10758826f5cbae305 \ + --hash=sha256:dddaae9b81c88083e6437de95c41e86823d150f4ee94bf24e158a4526cbead01 \ + --hash=sha256:de7202ffe4d4a8c1e3cde1c03e01c1a3772c92858837e8f3879b497158e4cb44 \ + --hash=sha256:e5bbe55e8552019c6463709b39634a5fc55e080d0827e2a3a11e18eb73f5cdbd \ + --hash=sha256:ea311d4ee9a8fa67f139c088ae9f905fcf0277d6cd75c310a21a88bf85e130f5 \ + --hash=sha256:fecd5089c4be1bcc37c35e9aa678938d2888845a134dd016de457b942cf5a758 + # via -r ./requirements/requirements.in +sqlparse==0.5.0 \ + --hash=sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93 \ + --hash=sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663 + # via + # -r ./requirements/requirements.in + # django +text-unidecode==1.3 \ + --hash=sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8 \ + --hash=sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93 + # via python-slugify +typing-extensions==4.11.0 \ + --hash=sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0 \ + --hash=sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a + # via + # dj-database-url + # pydantic + # pydantic-core + # pydash + # sqlalchemy +tzdata==2024.1 \ + --hash=sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd \ + --hash=sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252 + # via + # django + # pandas +uritemplate==4.1.1 \ + --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ + --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e + # via -r ./requirements/requirements.in +urllib3==2.2.2 \ + --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ + --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 + # via + # -r ./requirements/requirements.in + # botocore + # requests +zope-event==5.0 \ + --hash=sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26 \ + --hash=sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd + # via gevent +zope-interface==6.3 \ + --hash=sha256:014bb94fe6bf1786da1aa044eadf65bc6437bcb81c451592987e5be91e70a91e \ + --hash=sha256:01a0b3dd012f584afcf03ed814bce0fc40ed10e47396578621509ac031be98bf \ + --hash=sha256:10cde8dc6b2fd6a1d0b5ca4be820063e46ddba417ab82bcf55afe2227337b130 \ + --hash=sha256:187f7900b63845dcdef1be320a523dbbdba94d89cae570edc2781eb55f8c2f86 \ + --hash=sha256:1b0c4c90e5eefca2c3e045d9f9ed9f1e2cdbe70eb906bff6b247e17119ad89a1 \ + --hash=sha256:22e8a218e8e2d87d4d9342aa973b7915297a08efbebea5b25900c73e78ed468e \ + --hash=sha256:26c9a37fb395a703e39b11b00b9e921c48f82b6e32cc5851ad5d0618cd8876b5 \ + --hash=sha256:2bb78c12c1ad3a20c0d981a043d133299117b6854f2e14893b156979ed4e1d2c \ + --hash=sha256:2c3cfb272bcb83650e6695d49ae0d14dd06dc694789a3d929f23758557a23d92 \ + --hash=sha256:2f32010ffb87759c6a3ad1c65ed4d2e38e51f6b430a1ca11cee901ec2b42e021 \ + --hash=sha256:3c8731596198198746f7ce2a4487a0edcbc9ea5e5918f0ab23c4859bce56055c \ + --hash=sha256:40aa8c8e964d47d713b226c5baf5f13cdf3a3169c7a2653163b17ff2e2334d10 \ + --hash=sha256:4137025731e824eee8d263b20682b28a0bdc0508de9c11d6c6be54163e5b7c83 \ + --hash=sha256:46034be614d1f75f06e7dcfefba21d609b16b38c21fc912b01a99cb29e58febb \ + --hash=sha256:483e118b1e075f1819b3c6ace082b9d7d3a6a5eb14b2b375f1b80a0868117920 \ + --hash=sha256:4d6b229f5e1a6375f206455cc0a63a8e502ed190fe7eb15e94a312dc69d40299 \ + --hash=sha256:567d54c06306f9c5b6826190628d66753b9f2b0422f4c02d7c6d2b97ebf0a24e \ + --hash=sha256:5683aa8f2639016fd2b421df44301f10820e28a9b96382a6e438e5c6427253af \ + --hash=sha256:600101f43a7582d5b9504a7c629a1185a849ce65e60fca0f6968dfc4b76b6d39 \ + --hash=sha256:62e32f02b3f26204d9c02c3539c802afc3eefb19d601a0987836ed126efb1f21 \ + --hash=sha256:69dedb790530c7ca5345899a1b4cb837cc53ba669051ea51e8c18f82f9389061 \ + --hash=sha256:72d5efecad16c619a97744a4f0b67ce1bcc88115aa82fcf1dc5be9bb403bcc0b \ + --hash=sha256:8d407e0fd8015f6d5dfad481309638e1968d70e6644e0753f229154667dd6cd5 \ + --hash=sha256:a058e6cf8d68a5a19cb5449f42a404f0d6c2778b897e6ce8fadda9cea308b1b0 \ + --hash=sha256:a1adc14a2a9d5e95f76df625a9b39f4709267a483962a572e3f3001ef90ea6e6 \ + --hash=sha256:a56fe1261230093bfeedc1c1a6cd6f3ec568f9b07f031c9a09f46b201f793a85 \ + --hash=sha256:ad4524289d8dbd6fb5aa17aedb18f5643e7d48358f42c007a5ee51a2afc2a7c5 \ + --hash=sha256:afa0491a9f154cf8519a02026dc85a416192f4cb1efbbf32db4a173ba28b289a \ + --hash=sha256:bf34840e102d1d0b2d39b1465918d90b312b1119552cebb61a242c42079817b9 \ + --hash=sha256:c40df4aea777be321b7e68facb901bc67317e94b65d9ab20fb96e0eb3c0b60a1 \ + --hash=sha256:d0e7321557c702bd92dac3c66a2f22b963155fdb4600133b6b29597f62b71b12 \ + --hash=sha256:d165d7774d558ea971cb867739fb334faf68fc4756a784e689e11efa3becd59e \ + --hash=sha256:e78a183a3c2f555c2ad6aaa1ab572d1c435ba42f1dc3a7e8c82982306a19b785 \ + --hash=sha256:e8fa0fb05083a1a4216b4b881fdefa71c5d9a106e9b094cd4399af6b52873e91 \ + --hash=sha256:f83d6b4b22262d9a826c3bd4b2fbfafe1d0000f085ef8e44cd1328eea274ae6a \ + --hash=sha256:f95bebd0afe86b2adc074df29edb6848fc4d474ff24075e2c263d698774e108d + # via gevent + +# The following packages are considered to be unsafe in a requirements file: +setuptools==71.0.2 \ + --hash=sha256:ca359bea0cd5c8ce267d7463239107e87f312f2e2a11b6ca6357565d82b6c0d7 \ + --hash=sha256:f6640114f96be808024fbd1f721161215543796d3a68da4524349de700604ce8 + # via + # fs + # zope-event + # zope-interface diff --git a/backend/support/api/admin_api_v1_1_0/create_access_tables.sql b/backend/support/api/admin_api_v1_1_0/create_access_tables.sql index 6668c36aad..18db91956d 100644 --- a/backend/support/api/admin_api_v1_1_0/create_access_tables.sql +++ b/backend/support/api/admin_api_v1_1_0/create_access_tables.sql @@ -1,43 +1,43 @@ --- This is explicitly not a Django managed table. --- In order to have an administrative key added, --- it must be added via a Github commit, and a PR --- must be performed to merge the key into the tree. - --- This is because administrative keys can read/write --- to some tables in the database. They can read internal and --- in-flight data. - -DROP TABLE IF EXISTS support_administrative_key_uuids; - -CREATE TABLE support_administrative_key_uuids - ( - id BIGSERIAL PRIMARY KEY, - email TEXT, - uuid TEXT, - permissions TEXT, - added DATE - ); - -INSERT INTO support_administrative_key_uuids - (email, uuid, permissions, added) - VALUES - ( - 'matthew.jadud@gsa.gov', - '61ba59b2-f545-4c2f-9b24-9655c706a06c', - 'CREATE,READ,DELETE', - '2023-12-04' - ), - ( - 'daniel.swick@gsa.gov', - 'b6e08808-ecb2-4b6a-b928-46d4205497ff', - 'CREATE,READ,DELETE', - '2023-12-08' - ), - ( - 'fac-gov-test-users+api-tester-admin@gsa.gov', - 'dd60c3f9-053d-4d82-a309-c89da53559f4', - 'CREATE,READ,DELETE', - '2024-07-10' - ) - ; - +-- This is explicitly not a Django managed table. +-- In order to have an administrative key added, +-- it must be added via a Github commit, and a PR +-- must be performed to merge the key into the tree. + +-- This is because administrative keys can read/write +-- to some tables in the database. They can read internal and +-- in-flight data. + +DROP TABLE IF EXISTS support_administrative_key_uuids; + +CREATE TABLE support_administrative_key_uuids + ( + id BIGSERIAL PRIMARY KEY, + email TEXT, + uuid TEXT, + permissions TEXT, + added DATE + ); + +INSERT INTO support_administrative_key_uuids + (email, uuid, permissions, added) + VALUES + ( + 'matthew.jadud@gsa.gov', + '61ba59b2-f545-4c2f-9b24-9655c706a06c', + 'CREATE,READ,DELETE', + '2023-12-04' + ), + ( + 'daniel.swick@gsa.gov', + 'b6e08808-ecb2-4b6a-b928-46d4205497ff', + 'CREATE,READ,DELETE', + '2023-12-08' + ), + ( + 'fac-gov-test-users+api-tester-admin@gsa.gov', + 'dd60c3f9-053d-4d82-a309-c89da53559f4', + 'CREATE,READ,DELETE', + '2024-07-10' + ) + ; + diff --git a/docs/architecture/decisions/0034-handling-invalid-audit-report-during-migration.md b/docs/architecture/decisions/0034-handling-invalid-audit-report-during-migration.md index df48443be1..b317983c4d 100644 --- a/docs/architecture/decisions/0034-handling-invalid-audit-report-during-migration.md +++ b/docs/architecture/decisions/0034-handling-invalid-audit-report-during-migration.md @@ -1,92 +1,92 @@ -# 34. Handling Incorrect Audit Reports During Migration - -Date: 2024-05-17 - -## Status - -Accepted - -## Areas of impact - -- [ ] Compliance -- [ ] Content -- [ ] CX -- [ ] Design -- [x] Engineering -- [ ] Policy -- [ ] Product -- [x] Process -- [ ] UX - -## Related documents/links -- [Django models] (https://github.com/GSA-TTS/FAC/issues/3681) -- Other related tickets include: #3856, #3709, #3694, #3688, #3686, #3696 - -## Context - -As we migrate audit reports from Census table files to GSA/FAC database, we encounter a subset of reports with data validation issues that prevent their easy curation during the migration process. The complexity and nature of these data issues necessitate a strategy where these reports are migrated "as is", without undergoing validation or correction initially. This approach is deemed necessary to ensure all records are preserved intact for subsequent analysis and possible rectification. - -## Decision - -We propose to introduce two new Django models to handle these specific cases effectively: - -1. *InvalidAuditRecord**: - - This model acts similarly to the existing `MigrationInspectionRecord`, but instead of holding change records, it holds records that have been migrated as is, without validation or changes. - - The model will record the basic metadata of each report and the specific reasons it was migrated without validation. - -2. **IssueDescriptionRecord**: - - This model will capture detailed descriptions of the issues associated with each record, explaining why the reports could not be validated or corrected during the initial migration. - - Each `InvalidAuditRecord` can have one or more associated `IssueDescriptionRecord` entries, establishing a parent-child relationship. - -### Proposed Model Structures - -#### InvalidAuditRecord -```python -class InvalidAuditRecord(models.Model): - audit_year = models.TextField(blank=True, null=True) - dbkey = models.TextField(blank=True, null=True) - report_id = models.TextField(blank=True, null=True) - run_datetime = models.DateTimeField(default=timezone.now) - finding_text = models.JSONField(blank=True, null=True) # Described below - additional_uei = models.JSONField(blank=True, null=True) - additional_ein = models.JSONField(blank=True, null=True) - finding = models.JSONField(blank=True, null=True) - federal_award = models.JSONField(blank=True, null=True) - cap_text = models.JSONField(blank=True, null=True) - note = models.JSONField(blank=True, null=True) - passthrough = models.JSONField(blank=True, null=True) - general = models.JSONField(blank=True, null=True) - secondary_auditor = models.JSONField(blank=True, null=True) - -``` - -#### IssueDescriptionRecord -```python -class IssueDescriptionRecord(models.Model): - issue_detail = models.TextField() - issue_tag = models.TextField() - skipped_validation_method = models.TextField() - -``` - -### JSONField Content Structure for `InvalidAuditRecord` -```json -{ - "census_data": [{ - "value": "some value", - "column": "some column name" - } - ], - "issue_tag": "some_tag_name" -} -``` - -## Consequences - -By adopting this two-model approach: - -- We ensure that all audit reports, regardless of their initial data quality, are migrated and stored within the new system. -- The detailed logging and description of issues provide a robust foundation for future efforts aimed at correcting or understanding the compromised data. -- This approach also allows for greater flexibility and clarity in handling complex data issues, as each problematic record's specifics are documented thoroughly and can be addressed individually when resources or solutions become available. - -This ADR provides a structured and transparent method for managing the migration of problematic audit reports, ensuring accountability and traceability throughout the process. +# 34. Handling Incorrect Audit Reports During Migration + +Date: 2024-05-17 + +## Status + +Accepted + +## Areas of impact + +- [ ] Compliance +- [ ] Content +- [ ] CX +- [ ] Design +- [x] Engineering +- [ ] Policy +- [ ] Product +- [x] Process +- [ ] UX + +## Related documents/links +- [Django models] (https://github.com/GSA-TTS/FAC/issues/3681) +- Other related tickets include: #3856, #3709, #3694, #3688, #3686, #3696 + +## Context + +As we migrate audit reports from Census table files to GSA/FAC database, we encounter a subset of reports with data validation issues that prevent their easy curation during the migration process. The complexity and nature of these data issues necessitate a strategy where these reports are migrated "as is", without undergoing validation or correction initially. This approach is deemed necessary to ensure all records are preserved intact for subsequent analysis and possible rectification. + +## Decision + +We propose to introduce two new Django models to handle these specific cases effectively: + +1. *InvalidAuditRecord**: + - This model acts similarly to the existing `MigrationInspectionRecord`, but instead of holding change records, it holds records that have been migrated as is, without validation or changes. + - The model will record the basic metadata of each report and the specific reasons it was migrated without validation. + +2. **IssueDescriptionRecord**: + - This model will capture detailed descriptions of the issues associated with each record, explaining why the reports could not be validated or corrected during the initial migration. + - Each `InvalidAuditRecord` can have one or more associated `IssueDescriptionRecord` entries, establishing a parent-child relationship. + +### Proposed Model Structures + +#### InvalidAuditRecord +```python +class InvalidAuditRecord(models.Model): + audit_year = models.TextField(blank=True, null=True) + dbkey = models.TextField(blank=True, null=True) + report_id = models.TextField(blank=True, null=True) + run_datetime = models.DateTimeField(default=timezone.now) + finding_text = models.JSONField(blank=True, null=True) # Described below + additional_uei = models.JSONField(blank=True, null=True) + additional_ein = models.JSONField(blank=True, null=True) + finding = models.JSONField(blank=True, null=True) + federal_award = models.JSONField(blank=True, null=True) + cap_text = models.JSONField(blank=True, null=True) + note = models.JSONField(blank=True, null=True) + passthrough = models.JSONField(blank=True, null=True) + general = models.JSONField(blank=True, null=True) + secondary_auditor = models.JSONField(blank=True, null=True) + +``` + +#### IssueDescriptionRecord +```python +class IssueDescriptionRecord(models.Model): + issue_detail = models.TextField() + issue_tag = models.TextField() + skipped_validation_method = models.TextField() + +``` + +### JSONField Content Structure for `InvalidAuditRecord` +```json +{ + "census_data": [{ + "value": "some value", + "column": "some column name" + } + ], + "issue_tag": "some_tag_name" +} +``` + +## Consequences + +By adopting this two-model approach: + +- We ensure that all audit reports, regardless of their initial data quality, are migrated and stored within the new system. +- The detailed logging and description of issues provide a robust foundation for future efforts aimed at correcting or understanding the compromised data. +- This approach also allows for greater flexibility and clarity in handling complex data issues, as each problematic record's specifics are documented thoroughly and can be addressed individually when resources or solutions become available. + +This ADR provides a structured and transparent method for managing the migration of problematic audit reports, ensuring accountability and traceability throughout the process. diff --git a/docs/architecture/diagrams/Application/fac_system_context_unified.md b/docs/architecture/diagrams/Application/fac_system_context_unified.md index 7610ee6c0a..4b86f4fb81 100644 --- a/docs/architecture/diagrams/Application/fac_system_context_unified.md +++ b/docs/architecture/diagrams/Application/fac_system_context_unified.md @@ -1,89 +1,89 @@ -![FAC.gov Cloud ATO boundary view] - -```plantuml -@startuml -!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Container.puml - -Person(User, "User", "GranteeOrAuditor") -Person(Public, "User", "Public") -Person(Staff, "User", "FAC Staff") -Person(AgencyApp, "App", "Agency App") -Person(SecOps, "App", "GSA Sec Ops") - -note as ConnectionNote -All connections depicted are encrypted with TLS 1.2 unless otherwise noted. -All connextions are on port 443 and use https unles otherwise noted. -All connections use TCP. -end note - - -Boundary(cloudgov, "Cloud.gov Boundary") { - Boundary(atob, "ATO Boundary") { - Boundary(backend, "FAC application", "egress-controlled-space") { - System(api, "REST API ", "PostgREST") - System(django, "FAC Web App", "Django") - Boundary(services, "FAC Internal Services") { - System(scan, "Virus Scanner", "ClamAV") - } - System(scanner, "FAC File Scanner", "Python") - } - Boundary(proxy, "Proxy services", "egress-permitted-space"){ - System(https_proxy, "web egress proxy", "proxy for HTTP/S connections") - System(mail_proxy, "mail egress proxy", "proxy for SMTPS connections") - } - Boundary(pages, "cloud.gov pages") { - System(static, "FAC Static Site", "CG Pages") - } - Boundary(cloudgov-services,"Cloud.gov services") { - System(db, "Database", "Brokered postgreSQL") - System(s3, "PDF/XLS storage", "Brokered S3") - System(Logs, "System Logs", "Brokered S3") - } - } -} - - System(Login, "Login.gov", "ID provider") - System(datagov, "api.data.gov", "Access Provider") - System(samgov, "SAM.gov", "UEI Source") - System(Email, "GSA Email") - System(relic, "New Relic", "Telemetry site") - System(dap, "DAP", "Access abalytics") - System(clamav, "ClamAv Provider", "Vulnerability data provider") - - -AddRelTag("authenticated", $lineColor="#008787", $textColor="#008787") -Rel(User, django, "Submits/edits audits", $tags="authenticated") app.fac.gov -Rel(User, static, "Views static content") fac.gov -Rel(User, dap, "logs user visits data") -Rel(Public, django, "Searches for/reads information") -Rel(Staff, django, "Manages audits, roles, content", $tags="authenticated") - -Rel(User, Login, "Authenticates with") -Rel(Staff, Login, "Authenticates with") -Rel(AgencyApp, datagov, "Routes requests through") api.fac.gov -Rel(SecOps,Logs, "Retrieves logs for mining") - - -Rel(datagov, api, "Searches, filters, requests audit", $tags="authenticated") port 3000 -Rel(Login, django, "Autheniticated", "email address") - -Rel(api, db, "Fetches (read-only) Audits") port 5432 -Rel(AgencyApp, s3, "Fetches (read-only) PDF Docs") - -Rel(django, https_proxy, "Uses external services (Clam DB, SAM.gov, New Relic)") -Rel(https_proxy, samgov, "Looks up UEI info") -Rel(https_proxy, clamav, "retrieves vulnerability data") port 8080 -Rel(https_proxy, relic, "logs telemetry data") -Rel(django, mail_proxy, "Sends emails using") -Rel(mail_proxy, Email, "Sends emails using") port 587 -Rel(django, scan, "Scans attachments") -Rel(django, db, "read/write") port 5432 -Rel(django, s3, "Stores single audit packages/Excel files") -Rel(django, Logs, "Gathers and stores logs") -Rel(django, api, "Handles search requests") port 3000 -Rel(scanner, scan, "Scans files") -Rel(scanner, s3, "Reads files from s3") -Rel(scanner, db, "Writes scan results to table") -Rel(scanner, https_proxy, "Reports logs to New Relic") -@enduml -``` \ No newline at end of file +![FAC.gov Cloud ATO boundary view] + +```plantuml +@startuml +!include https://raw.githubusercontent.com/plantuml-stdlib/C4-PlantUML/master/C4_Container.puml + +Person(User, "User", "GranteeOrAuditor") +Person(Public, "User", "Public") +Person(Staff, "User", "FAC Staff") +Person(AgencyApp, "App", "Agency App") +Person(SecOps, "App", "GSA Sec Ops") + +note as ConnectionNote +All connections depicted are encrypted with TLS 1.2 unless otherwise noted. +All connextions are on port 443 and use https unles otherwise noted. +All connections use TCP. +end note + + +Boundary(cloudgov, "Cloud.gov Boundary") { + Boundary(atob, "ATO Boundary") { + Boundary(backend, "FAC application", "egress-controlled-space") { + System(api, "REST API ", "PostgREST") + System(django, "FAC Web App", "Django") + Boundary(services, "FAC Internal Services") { + System(scan, "Virus Scanner", "ClamAV") + } + System(scanner, "FAC File Scanner", "Python") + } + Boundary(proxy, "Proxy services", "egress-permitted-space"){ + System(https_proxy, "web egress proxy", "proxy for HTTP/S connections") + System(mail_proxy, "mail egress proxy", "proxy for SMTPS connections") + } + Boundary(pages, "cloud.gov pages") { + System(static, "FAC Static Site", "CG Pages") + } + Boundary(cloudgov-services,"Cloud.gov services") { + System(db, "Database", "Brokered postgreSQL") + System(s3, "PDF/XLS storage", "Brokered S3") + System(Logs, "System Logs", "Brokered S3") + } + } +} + + System(Login, "Login.gov", "ID provider") + System(datagov, "api.data.gov", "Access Provider") + System(samgov, "SAM.gov", "UEI Source") + System(Email, "GSA Email") + System(relic, "New Relic", "Telemetry site") + System(dap, "DAP", "Access abalytics") + System(clamav, "ClamAv Provider", "Vulnerability data provider") + + +AddRelTag("authenticated", $lineColor="#008787", $textColor="#008787") +Rel(User, django, "Submits/edits audits", $tags="authenticated") app.fac.gov +Rel(User, static, "Views static content") fac.gov +Rel(User, dap, "logs user visits data") +Rel(Public, django, "Searches for/reads information") +Rel(Staff, django, "Manages audits, roles, content", $tags="authenticated") + +Rel(User, Login, "Authenticates with") +Rel(Staff, Login, "Authenticates with") +Rel(AgencyApp, datagov, "Routes requests through") api.fac.gov +Rel(SecOps,Logs, "Retrieves logs for mining") + + +Rel(datagov, api, "Searches, filters, requests audit", $tags="authenticated") port 3000 +Rel(Login, django, "Autheniticated", "email address") + +Rel(api, db, "Fetches (read-only) Audits") port 5432 +Rel(AgencyApp, s3, "Fetches (read-only) PDF Docs") + +Rel(django, https_proxy, "Uses external services (Clam DB, SAM.gov, New Relic)") +Rel(https_proxy, samgov, "Looks up UEI info") +Rel(https_proxy, clamav, "retrieves vulnerability data") port 8080 +Rel(https_proxy, relic, "logs telemetry data") +Rel(django, mail_proxy, "Sends emails using") +Rel(mail_proxy, Email, "Sends emails using") port 587 +Rel(django, scan, "Scans attachments") +Rel(django, db, "read/write") port 5432 +Rel(django, s3, "Stores single audit packages/Excel files") +Rel(django, Logs, "Gathers and stores logs") +Rel(django, api, "Handles search requests") port 3000 +Rel(scanner, scan, "Scans files") +Rel(scanner, s3, "Reads files from s3") +Rel(scanner, db, "Writes scan results to table") +Rel(scanner, https_proxy, "Reports logs to New Relic") +@enduml +``` diff --git a/docs/backups_and_restores.md b/docs/backups_and_restores.md index 307c319afb..9019cae010 100644 --- a/docs/backups_and_restores.md +++ b/docs/backups_and_restores.md @@ -1,89 +1,89 @@ -### Preperation steps -```sh -cf t -o -s -``` - -Bind the backups bucket to the application -```sh -cf bind-service gsa-fac backups -``` - -Restart the app so changes occur and wait for the instance to come back up -```sh -cf restart gsa-fac --strategy rolling -``` - -Unbind the existing fac-private-s3 bucket from the app -```sh -cf unbind-service gsa-fac fac-private-s3 -``` - -Rebind the fac-private-s3 bucket with the backups bucket as an additional instance -```sh -cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' -``` - -Restart the app so changes occur and wait for the instance to come back up -```sh -cf restart gsa-fac --strategy rolling -``` - -### Database Backups - -Information regarding the fac-backup-utility can be found [at the repository](https://github.com/GSA-TTS/fac-backup-utility). -Database backups occur in the following ways: -1. An initial backup, where a backup has not been run in the target environment. This input of `initial_backup` is important, as when it does a the `db_to_db` command, it will not truncate the target table, as the table does not exist in the destination database. -```bash -./fac-backup-util.sh v0.1.5 initial_backup -# Curl the utility -# Install AWS -# DB to S3 table dump (backups) -# DB to DB table dump (fac-db -> fac-snapshot-db) [No Truncate, as tables dont exist] -# AWS S3 sync (fac-private-s3 -> backups) -``` - -2. A deploy backup, where the `db_to_db` function is not called. This is a standard backup strategy before the application deploys, to ensure the s3 contents of the primary s3 are sync'd to the backups bucket, and a table dump is stored in the backups bucket. -```bash -./fac-backup-util.sh v0.1.5 deploy_backup -# Curl the utility -# Install AWS -# DB to S3 table dump (backups) -# AWS S3 sync (fac-private-s3 -> backups) -``` - -3. A scheduled backup is run every two hours, across each environment, ensuring that we have a clean backup in s3, rds, and the bucket contents are in sync. -```bash -./fac-backup-util.sh v0.1.5 scheduled_backup -# Curl the utility -# Install AWS -# DB to S3 table dump (fac-db -> backups) -# DB to DB table dump (fac-db -> fac-snapshot-db) [Truncate target table before dump] -# AWS S3 sync (fac-private-s3 -> backups) -``` - -### Restoring -Restoring from backups can be run via workflow, from designated individuals. There are two paths that we can restore from. - -1. S3 Restore takes a `operation-mm-DD-HH` input (ex `scheduled-06-04-10`), and is required for the backups to be restored. The utility looks in `s3://${bucket}/backups/operation-mm-DD-HH/` for its table dumps, and without supplying the target backups, it will not restore. Once it does a `--data-only` restoration, it will then sync the files from the backups bucket to the application bucket. We do this to ensure the contents of the application bucket are up to date, relative to the data in the database. We know that if we use the latest folder in `/backups/` then the contents of the s3 are the latest available, from the prior backup. -```bash -./fac-restore-util.sh v0.1.5 s3_restore scheduled-06-04-10 -# Curl the utility -# Install AWS -# DB to S3 table dump (backups -> fac-db) [Truncate target table before --data-only pg_restore] -# AWS S3 sync (backups -> fac-private-s3) -``` -Potential Options for restoration: -```bash -initial-YYYYmmddHHMM -scheduled-mm-dd-HH -daily-mm-dd -``` - -2. Database to database restoration also can occur as well, using `psql` to dump the tables from the cold store database to the live database. -```bash -./fac-restore-util.sh v0.1.5 db_restore -# Curl the utility -# Install AWS -# DB to DB table dump (fac-snapshot-db -> fac-db) [Truncate target table before dump] -# AWS S3 sync (fac-private-s3 -> backups) -``` +### Preperation steps +```sh +cf t -o -s +``` + +Bind the backups bucket to the application +```sh +cf bind-service gsa-fac backups +``` + +Restart the app so changes occur and wait for the instance to come back up +```sh +cf restart gsa-fac --strategy rolling +``` + +Unbind the existing fac-private-s3 bucket from the app +```sh +cf unbind-service gsa-fac fac-private-s3 +``` + +Rebind the fac-private-s3 bucket with the backups bucket as an additional instance +```sh +cf bind-service gsa-fac fac-private-s3 -c '{"additional_instances": ["backups"]}' +``` + +Restart the app so changes occur and wait for the instance to come back up +```sh +cf restart gsa-fac --strategy rolling +``` + +### Database Backups + +Information regarding the fac-backup-utility can be found [at the repository](https://github.com/GSA-TTS/fac-backup-utility). +Database backups occur in the following ways: +1. An initial backup, where a backup has not been run in the target environment. This input of `initial_backup` is important, as when it does a the `db_to_db` command, it will not truncate the target table, as the table does not exist in the destination database. +```bash +./fac-backup-util.sh v0.1.5 initial_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (backups) +# DB to DB table dump (fac-db -> fac-snapshot-db) [No Truncate, as tables dont exist] +# AWS S3 sync (fac-private-s3 -> backups) +``` + +2. A deploy backup, where the `db_to_db` function is not called. This is a standard backup strategy before the application deploys, to ensure the s3 contents of the primary s3 are sync'd to the backups bucket, and a table dump is stored in the backups bucket. +```bash +./fac-backup-util.sh v0.1.5 deploy_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (backups) +# AWS S3 sync (fac-private-s3 -> backups) +``` + +3. A scheduled backup is run every two hours, across each environment, ensuring that we have a clean backup in s3, rds, and the bucket contents are in sync. +```bash +./fac-backup-util.sh v0.1.5 scheduled_backup +# Curl the utility +# Install AWS +# DB to S3 table dump (fac-db -> backups) +# DB to DB table dump (fac-db -> fac-snapshot-db) [Truncate target table before dump] +# AWS S3 sync (fac-private-s3 -> backups) +``` + +### Restoring +Restoring from backups can be run via workflow, from designated individuals. There are two paths that we can restore from. + +1. S3 Restore takes a `operation-mm-DD-HH` input (ex `scheduled-06-04-10`), and is required for the backups to be restored. The utility looks in `s3://${bucket}/backups/operation-mm-DD-HH/` for its table dumps, and without supplying the target backups, it will not restore. Once it does a `--data-only` restoration, it will then sync the files from the backups bucket to the application bucket. We do this to ensure the contents of the application bucket are up to date, relative to the data in the database. We know that if we use the latest folder in `/backups/` then the contents of the s3 are the latest available, from the prior backup. +```bash +./fac-restore-util.sh v0.1.5 s3_restore scheduled-06-04-10 +# Curl the utility +# Install AWS +# DB to S3 table dump (backups -> fac-db) [Truncate target table before --data-only pg_restore] +# AWS S3 sync (backups -> fac-private-s3) +``` +Potential Options for restoration: +```bash +initial-YYYYmmddHHMM +scheduled-mm-dd-HH +daily-mm-dd +``` + +2. Database to database restoration also can occur as well, using `psql` to dump the tables from the cold store database to the live database. +```bash +./fac-restore-util.sh v0.1.5 db_restore +# Curl the utility +# Install AWS +# DB to DB table dump (fac-snapshot-db -> fac-db) [Truncate target table before dump] +# AWS S3 sync (fac-private-s3 -> backups) +```