diff --git a/.github/workflows/auto-merge-staging-pr.yml b/.github/workflows/auto-merge-staging-pr.yml new file mode 100644 index 0000000000..5a77cf5ff8 --- /dev/null +++ b/.github/workflows/auto-merge-staging-pr.yml @@ -0,0 +1,19 @@ +--- +name: Auto Merge Pull Request With Approved Label +on: + repository_dispatch: + types: [ ready-to-merge ] +jobs: + auto-merge: + name: Auto Merge The Created Pull Request + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - id: automerge + name: Auto Merge a PR with the correct labels + uses: pascalgn/automerge-action@v0.15.6 + env: + GITHUB_TOKEN: ${{ secrets.DEPLOY_TOKEN }} + MERGE_LABELS: "automerge,autogenerated" + MERGE_METHOD: "merge" diff --git a/.github/workflows/create-pull-request-to-staging.yml b/.github/workflows/create-pull-request-to-staging.yml index cd48603b7d..14f0367727 100644 --- a/.github/workflows/create-pull-request-to-staging.yml +++ b/.github/workflows/create-pull-request-to-staging.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets.DEPLOY_TOKEN }} jobs: check-commit: @@ -52,13 +52,24 @@ jobs: - name: Open Pull Request id: open-pr run: | - gh pr create --label "autogenerated" --title "${{ steps.date.outputs.date }} | MAIN --> PROD | DEV (${{ steps.git-short.outputs.short_sha }}) --> STAGING" --body "This is an auto-generated pull request to merge main into prod for a staging release on ${{ steps.date.outputs.date }} with the last commit being merged as ${{ steps.git-short.outputs.short_sha }}" --base prod --head main + gh pr create --label "autogenerated" --label "automerge" \ + --title "${{ steps.date.outputs.date }} | MAIN --> PROD | DEV (${{ steps.git-short.outputs.short_sha }}) --> STAGING" \ + --body "This is an auto-generated pull request to merge main into prod for a staging release on ${{ steps.date.outputs.date }} with the last commit being merged as ${{ steps.git-short.outputs.short_sha }}" \ + --base prod --head main - name: Get Pull Request Number id: pr-number run: echo "pull_request_number=$(gh pr view --json number -q .number || echo "")" >> $GITHUB_OUTPUT - - name: Comment on Pull Request - id: pr-comment - run: | - gh pr comment ${{ steps.pr-number.outputs.pull_request_number }} --body "Please close and re-open this pull request to ensure that a terraform plan is generated on the PR for the staging deployment after merging." + auto-approve: + name: Auto Approve The Created Pull Request + needs: [pull-request] + if: ${{ needs.pull-request.outputs.pr_number != null }} + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - uses: hmarr/auto-approve-action@v3 + with: + pull-request-number: ${{ needs.pull-request.outputs.pr_number }} + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml new file mode 100644 index 0000000000..1dbc6e69f7 --- /dev/null +++ b/.github/workflows/create-release.yml @@ -0,0 +1,40 @@ +--- +name: Create Release +on: +# schedule: +# - cron: '0 18 * * 3' + workflow_dispatch: null + +jobs: + release: + permissions: write-all + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + ref: prod + + - name: Get Current Date + id: date + run: | + echo "date=$(date +%Y%m%d)" >> $GITHUB_OUTPUT + + - name: Changelog + uses: Bullrich/generate-release-changelog@master + id: changelog + env: + REPO: ${{ github.repository }} + + - name: Create Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v1.${{ steps.date.outputs.date }} + release_name: Release for ${{ steps.date.outputs.date }} + body: | + ${{ steps.changelog.outputs.changelog }} + draft: false + prerelease: true diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 9f9a8478d0..fd1acd2f8c 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -86,3 +86,19 @@ jobs: with: environment: "production" secrets: inherit + + repo-event: + if: ${{ github.base_ref == 'prod' }} + name: Set Repository Event + permissions: + contents: write + runs-on: ubuntu-latest + needs: [terraform-plan-staging, testing-from-ghcr] + steps: + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + event-type: ready-to-merge + client-payload: '{"github": ${{ toJson(github) }}}' + if: github.event_name == 'pull_request' diff --git a/backend/api/views.py b/backend/api/views.py index d5d9f3f0cd..e6a4bae0ce 100644 --- a/backend/api/views.py +++ b/backend/api/views.py @@ -4,6 +4,7 @@ from django.http import Http404, HttpResponse, JsonResponse from django.urls import reverse from django.views import View, generic +from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from rest_framework import viewsets from rest_framework.authentication import BaseAuthentication @@ -23,6 +24,8 @@ UEISerializer, ) +UserModel = get_user_model() + AUDITEE_INFO_PREVIOUS_STEP_DATA_WE_NEED = [ "user_provided_organization_type", "met_spending_threshold", @@ -128,6 +131,7 @@ def access_and_submission_check(user, data): role="certifying_auditor_contact", email=serializer.data.get("certifying_auditor_contact"), ) + for contact in serializer.data.get("auditee_contacts"): Access.objects.create(sac=sac, role="editor", email=contact) for contact in serializer.data.get("auditor_contacts"): diff --git a/backend/audit/cross_validation/__init__.py b/backend/audit/cross_validation/__init__.py index 6a59fde1f8..6a4716e847 100644 --- a/backend/audit/cross_validation/__init__.py +++ b/backend/audit/cross_validation/__init__.py @@ -55,11 +55,13 @@ ] """ -from .auditee_ueis_match import auditee_ueis_match from .additional_ueis import additional_ueis +from .auditee_ueis_match import auditee_ueis_match +from .audit_findings import audit_findings from .sac_validation_shape import sac_validation_shape # noqa: F401 functions = [ + audit_findings, auditee_ueis_match, additional_ueis, ] diff --git a/backend/audit/cross_validation/audit_findings.py b/backend/audit/cross_validation/audit_findings.py new file mode 100644 index 0000000000..edfbd10f64 --- /dev/null +++ b/backend/audit/cross_validation/audit_findings.py @@ -0,0 +1,28 @@ +def audit_findings(sac_dict): + """ + Checks that the number of audit findings in the Federal Awards and Audit Findings + sections are consistent. + """ + all_sections = sac_dict["sf_sac_sections"] + awards_outer = all_sections.get("federal_awards", {}) + awards = awards_outer.get("federal_awards", []) if awards_outer else [] + finds_outer = all_sections.get("findings_text", {}) + findings = finds_outer.get("findings_text_entries", []) if finds_outer else [] + # If both empty, that's consistent: + if not awards and not findings: + return [] + if awards: + # How to determine if findings is required? Loop through the awards? + num_expected = sum(_.get("number_of_audit_findings", 0) for _ in findings) + # For now, just check for non-zero + if num_expected and not awards: + return [ + { + "error": "There are findings listed in Federal Awards " + " but none in Findings Text" + } + ] + + if False > 1: + return [{"error": "error message"}] + return [] diff --git a/backend/audit/cross_validation/errors.py b/backend/audit/cross_validation/errors.py index 999a78332d..fde0aed512 100644 --- a/backend/audit/cross_validation/errors.py +++ b/backend/audit/cross_validation/errors.py @@ -20,6 +20,10 @@ def err_auditee_ueis_match(): return "Not all auditee UEIs matched." +def err_awards_findings_but_no_findings_text(): + return "There are findings indicated in Federal Awards but" "none in Findings Text." + + def err_missing_tribal_data_sharing_consent(): return ( "As a tribal organization, you must complete the data " diff --git a/backend/audit/etl.py b/backend/audit/etl.py index b770dcca03..67fb02f87f 100644 --- a/backend/audit/etl.py +++ b/backend/audit/etl.py @@ -35,7 +35,7 @@ def load_all(self): self.load_passthrough, self.load_finding_texts, self.load_captext, - # self.load_audit_info() # TODO: Uncomment when SingleAuditChecklist adds audit_information + self.load_audit_info, ) for load_method in load_methods: try: @@ -47,6 +47,11 @@ def load_all(self): def load_finding_texts(self): findings_text = self.single_audit_checklist.findings_text + + if not findings_text: + logger.warning("No finding texts found to load") + return + findings_text_entries = findings_text["FindingsText"]["findings_text_entries"] for entry in findings_text_entries: finding_text_ = FindingText( @@ -61,6 +66,10 @@ def load_findings(self): findings_uniform_guidance = ( self.single_audit_checklist.findings_uniform_guidance ) + if not findings_uniform_guidance: + logger.warning("No findings found to load") + return + findings_uniform_guidance_entries = findings_uniform_guidance[ "FindingsUniformGuidance" ]["findings_uniform_guidance_entries"] @@ -86,6 +95,19 @@ def load_findings(self): def load_federal_award(self): federal_awards = self.single_audit_checklist.federal_awards + report_id = self.single_audit_checklist.report_id + try: + general = General.objects.get(report_id=report_id) + except General.DoesNotExist: + logger.error( + f"General must be loaded before FederalAward. report_id = {report_id}" + ) + return + general.total_amount_expended = federal_awards["FederalAwards"].get( + "total_amount_expended" + ) + general.save() + for entry in federal_awards["FederalAwards"]["federal_awards"]: program = entry["program"] loan = entry["loan_or_loan_guarantee"] @@ -113,17 +135,19 @@ def load_federal_award(self): is_loan=loan["is_guaranteed"] == "Y", loan_balance=loan["loan_balance_at_audit_period_end"], is_direct=is_direct, - is_major=program["is_major"] == "Y", mp_audit_report_type=program["audit_report_type"], findings_count=program["number_of_audit_findings"], is_passthrough_award=is_passthrough, passthrough_amount=subrecipient_amount, - type_requirement=None, # TODO: What is this? ) federal_award.save() def load_captext(self): corrective_action_plan = self.single_audit_checklist.corrective_action_plan + if not corrective_action_plan: + logger.warning("No corrective action plans found to load") + return + corrective_action_plan_entries = corrective_action_plan["CorrectiveActionPlan"][ "corrective_action_plan_entries" ] @@ -138,6 +162,10 @@ def load_captext(self): def load_note(self): notes_to_sefa = self.single_audit_checklist.notes_to_sefa["NotesToSefa"] + if not notes_to_sefa: + logger.warning("No notes to sefa found to load") + return + accounting_policies = notes_to_sefa["accounting_policies"] is_minimis_rate_used = notes_to_sefa["is_minimis_rate_used"] == "Y" rate_explained = notes_to_sefa["rate_explained"] @@ -190,6 +218,13 @@ def load_revision(self): def load_passthrough(self): federal_awards = self.single_audit_checklist.federal_awards for entry in federal_awards["FederalAwards"]["federal_awards"]: + entities = ( + entry["direct_or_indirect_award"] + and entry["direct_or_indirect_award"]["entities"] + ) + if not entities: + logger.warning("No passthrough to load") + return for entity in entry["direct_or_indirect_award"]["entities"]: passthrough = Passthrough( award_reference=entry["award_reference"], @@ -274,9 +309,7 @@ def load_general(self): entity_type=general_information["user_provided_organization_type"], number_months=general_information["audit_period_other_months"], audit_period_covered=general_information["audit_period_covered"], - is_report_required=None, # TODO: Notes say this hasn't been used since 2008. - total_fed_expenditures=None, # TODO: Where does this come from? - type_report_major_program=None, # TODO: Where does this come from? + total_amount_expended=None, # loaded from FederalAward type_audit_code="UG", is_public=self.single_audit_checklist.is_public, data_source="G-FAC", @@ -285,6 +318,9 @@ def load_general(self): def load_secondary_auditor(self): secondary_auditors = self.single_audit_checklist.secondary_auditors + if not secondary_auditors: + logger.warning("No secondary_auditors found to load") + return for secondary_auditor in secondary_auditors["SecondaryAuditors"][ "secondary_auditors_entries" @@ -306,17 +342,26 @@ def load_secondary_auditor(self): sec_auditor.save() def load_audit_info(self): - general = General.objects.get(report_id=self.single_audit_checklist.report_id) + report_id = self.single_audit_checklist.report_id + try: + general = General.objects.get(report_id=report_id) + except General.DoesNotExist: + logger.error( + f"General must be loaded before AuditInfo. report_id = {report_id}" + ) + return audit_information = self.single_audit_checklist.audit_information - + if not audit_information: + logger.warning("No audit info found to load") + return general.gaap_results = audit_information["gaap_results"] - """ - TODO: - Missing in schema - general.sp_framework = audit_information[] - general.is_sp_framework_required = audit_information[] - general.sp_framework_auditor_opinion = audit_information[] - """ + general.sp_framework = audit_information["sp_framework_basis"] + general.is_sp_framework_required = ( + audit_information["is_sp_framework_required"] == "Y" + ) + general.sp_framework_auditor_opinion = audit_information[ + "sp_framework_opinions" + ] general.is_going_concern = audit_information["is_going_concern_included"] == "Y" general.is_significant_deficiency = ( audit_information["is_internal_control_deficiency_disclosed"] == "Y" diff --git a/backend/audit/fixtures/json/federal-awards--test0001test--simple-pass.json b/backend/audit/fixtures/json/federal-awards--test0001test--simple-pass.json new file mode 100644 index 0000000000..0969498bea --- /dev/null +++ b/backend/audit/fixtures/json/federal-awards--test0001test--simple-pass.json @@ -0,0 +1,42 @@ +{ + "FederalAwards": { + "auditee_uei": "TEST0001TEST", + "total_amount_expended": 12345, + "federal_awards": [ + { + "program": { + "federal_agency_prefix": "42", + "three_digit_extension": "RD", + "additional_award_identification": 1234, + "program_name": "FLOWER DREAMING", + "is_major": "N", + "audit_report_type": "", + "number_of_audit_findings": 0, + "amount_expended": 500, + "federal_program_total": 500 + }, + "loan_or_loan_guarantee": { + "is_guaranteed": "N", + "loan_balance_at_audit_period_end": 0 + }, + "direct_or_indirect_award": { + "is_direct": "N", + "entities": [ + { + "passthrough_name": "Lothlórien", + "passthrough_identifying_number": "54321" + } + ] + }, + "cluster": { + "cluster_name": "N/A", + "cluster_total": 0 + }, + "subrecipients": { + "is_passed": "N" + }, + "award_reference": "AWARD-0001" + } + ] + } +} diff --git a/backend/audit/fixtures/json/general-information--test0001test--simple-pass.json b/backend/audit/fixtures/json/general-information--test0001test--simple-pass.json new file mode 100644 index 0000000000..8be3fc1308 --- /dev/null +++ b/backend/audit/fixtures/json/general-information--test0001test--simple-pass.json @@ -0,0 +1,35 @@ +{ + "ein": "123456789", + "audit_type": "single-audit", + "auditee_uei": "TEST0001TEST", + "auditee_zip": "94109", + "auditor_ein": "987654321", + "auditor_zip": "94109", + "auditee_city": "HAL", + "auditee_name": "IBM", + "auditor_city": "Rivendell", + "is_usa_based": true, + "auditee_email": "fictional.fac.tester+test@gsa.gov", + "auditee_phone": "5558675309", + "auditee_state": "CA", + "auditor_email": "fictional.fac.tester+test@gsa.gov", + "auditor_phone": "5558675309", + "auditor_state": "CA", + "auditor_country": "United States", + "auditor_firm_name": "Fellowship", + "audit_period_covered": "annual", + "auditee_contact_name": "Frodo Baggins", + "auditor_contact_name": "Sauron", + "auditee_contact_title": "Ringbearer", + "auditor_contact_title": "Dark Lord", + "multiple_eins_covered": false, + "multiple_ueis_covered": false, + "auditee_address_line_1": "HAL", + "auditor_address_line_1": "Main Hall", + "met_spending_threshold": true, + "auditee_fiscal_period_end": "2023-01-01", + "ein_not_an_ssn_attestation": true, + "auditee_fiscal_period_start": "2022-01-01", + "user_provided_organization_type": "non-profit", + "auditor_ein_not_an_ssn_attestation": true +} diff --git a/backend/audit/fixtures/single_audit_checklist.py b/backend/audit/fixtures/single_audit_checklist.py index 8258904b67..00c4960647 100644 --- a/backend/audit/fixtures/single_audit_checklist.py +++ b/backend/audit/fixtures/single_audit_checklist.py @@ -76,6 +76,47 @@ def _fake_general_information(auditee_name=None): return general_information +def fake_auditor_certification(): + """Create fake auditor confirmation form data.""" + fake = Faker() + data_step_1 = { + "is_OMB_limited": True, + "is_auditee_responsible": True, + "has_used_auditors_report": True, + "has_no_auditee_procedures": True, + "is_FAC_releasable": True, + } + data_step_2 = { + "auditor_name": fake.name(), + "auditor_title": fake.job(), + "auditor_certification_date_signed": fake.date(), + } + + return data_step_1, data_step_2 + + +def fake_auditee_certification(): + """Create fake auditor confirmation form data.""" + fake = Faker() + data_step_1 = { + "has_no_PII": True, + "has_no_BII": True, + "meets_2CFR_specifications": True, + "is_2CFR_compliant": True, + "is_complete_and_accurate": True, + "has_engaged_auditor": True, + "is_issued_and_signed": True, + "is_FAC_releasable": True, + } + data_step_2 = { + "auditee_name": fake.name(), + "auditee_title": fake.job(), + "auditee_certification_date_signed": fake.date(), + } + + return data_step_1, data_step_2 + + def _create_sac(user, auditee_name): """Create a single example SAC.""" SingleAuditChecklist = apps.get_model("audit.SingleAuditChecklist") diff --git a/backend/audit/forms.py b/backend/audit/forms.py index d122f09799..78158e53cc 100644 --- a/backend/audit/forms.py +++ b/backend/audit/forms.py @@ -97,3 +97,34 @@ def clean_booleans(self): dollar_threshold = forms.IntegerField(min_value=1) is_low_risk_auditee = forms.MultipleChoiceField(choices=choices_YoN) agencies = forms.MultipleChoiceField(choices=choices_agencies) + + +class AuditorCertificationStep1Form(forms.Form): + is_OMB_limited = forms.BooleanField() + is_auditee_responsible = forms.BooleanField() + has_used_auditors_report = forms.BooleanField() + has_no_auditee_procedures = forms.BooleanField() + is_FAC_releasable = forms.BooleanField() + + +class AuditorCertificationStep2Form(forms.Form): + auditor_name = forms.CharField() + auditor_title = forms.CharField() + auditor_certification_date_signed = forms.DateField() + + +class AuditeeCertificationStep1Form(forms.Form): + has_no_PII = forms.BooleanField() + has_no_BII = forms.BooleanField() + meets_2CFR_specifications = forms.BooleanField() + is_2CFR_compliant = forms.BooleanField() + is_complete_and_accurate = forms.BooleanField() + has_engaged_auditor = forms.BooleanField() + is_issued_and_signed = forms.BooleanField() + is_FAC_releasable = forms.BooleanField() + + +class AuditeeCertificationStep2Form(forms.Form): + auditee_name = forms.CharField() + auditee_title = forms.CharField() + auditee_certification_date_signed = forms.DateField() diff --git a/backend/audit/migrations/0032_singleauditchecklist_auditee_certification_and_more.py b/backend/audit/migrations/0032_singleauditchecklist_auditee_certification_and_more.py new file mode 100644 index 0000000000..634e0c5884 --- /dev/null +++ b/backend/audit/migrations/0032_singleauditchecklist_auditee_certification_and_more.py @@ -0,0 +1,31 @@ +# Generated by Django 4.2.3 on 2023-08-07 18:21 + +import audit.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("audit", "0031_singleauditreportfile_component_page_numbers"), + ] + + operations = [ + migrations.AddField( + model_name="singleauditchecklist", + name="auditee_certification", + field=models.JSONField( + blank=True, + null=True, + validators=[audit.validators.validate_auditee_certification_json], + ), + ), + migrations.AddField( + model_name="singleauditchecklist", + name="auditor_certification", + field=models.JSONField( + blank=True, + null=True, + validators=[audit.validators.validate_auditor_certification_json], + ), + ), + ] diff --git a/backend/audit/migrations/0033_singleauditchecklist_tribal_data_consent.py b/backend/audit/migrations/0033_singleauditchecklist_tribal_data_consent.py new file mode 100644 index 0000000000..f8ab5dd211 --- /dev/null +++ b/backend/audit/migrations/0033_singleauditchecklist_tribal_data_consent.py @@ -0,0 +1,22 @@ +# Generated by Django 4.2.3 on 2023-08-10 19:49 + +import audit.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("audit", "0032_singleauditchecklist_auditee_certification_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="singleauditchecklist", + name="tribal_data_consent", + field=models.JSONField( + blank=True, + null=True, + validators=[audit.validators.validate_tribal_data_consent_json], + ), + ), + ] diff --git a/backend/audit/models.py b/backend/audit/models.py index fa3ef138c0..91b2a16c48 100644 --- a/backend/audit/models.py +++ b/backend/audit/models.py @@ -26,6 +26,9 @@ validate_secondary_auditors_json, validate_notes_to_sefa_json, validate_single_audit_report_file, + validate_auditor_certification_json, + validate_auditee_certification_json, + validate_tribal_data_consent_json, validate_audit_information_json, validate_component_page_numbers, ) @@ -273,6 +276,18 @@ class STATUS: blank=True, null=True, validators=[validate_notes_to_sefa_json] ) + auditor_certification = models.JSONField( + blank=True, null=True, validators=[validate_auditor_certification_json] + ) + + auditee_certification = models.JSONField( + blank=True, null=True, validators=[validate_auditee_certification_json] + ) + + tribal_data_consent = models.JSONField( + blank=True, null=True, validators=[validate_tribal_data_consent_json] + ) + def validate_full(self): """ Full validation, intended for use when the user indicates that the diff --git a/backend/audit/templates/audit/auditee-certification-step-1.html b/backend/audit/templates/audit/auditee-certification-step-1.html new file mode 100644 index 0000000000..462eee45c6 --- /dev/null +++ b/backend/audit/templates/audit/auditee-certification-step-1.html @@ -0,0 +1,126 @@ +{% extends "base.html" %} +{% load static %} +{% block content %} +
Audit report PDF (Complete)
Upload the single audit report package, or audit report. This should be a single PDF that is unlocked and machine-readable.
- {% include './created-by.html' with time=audit_report.completed_date name=audit_report.created_by %} + {% include './created-by.html' with time=single_audit_report.completed_date name=single_audit_report.created_by %}Audit report PDF @@ -58,21 +58,21 @@ {% comment %} Workbook 1: Federal Awards {% endcomment %}
Workbook 1: Federal Awards (Complete)
For each federal award received, you'll need the financial and agency details. This is also where you list the number of audit findings.
- {% include './created-by.html' with time=federal_awards_workbook.completed_date name=federal_awards_workbook.created_by %} + {% include './created-by.html' with time=federal_awards.completed_date name=federal_awards.created_by %}
- {% if audit_findings_workbook.completed %}
- {% include './icon-list-icon.html' with completed=audit_findings_workbook.completed %}
+ {% if findings_uniform_guidance.completed %}
+ {% include './icon-list-icon.html' with completed=findings_uniform_guidance.completed %}
Workbook 2: Federal Awards Audit Findings (Complete)
Complete this section using the Summary Schedule of Prior Audit Findings and the information in the financial statement audit. If there are no audit findings listed in Workbook 1: Federal Awards, you do not need to complete this workbook.
- {% if findings_text_workbook.completed %}
- {% include './icon-list-icon.html' with completed=findings_text_workbook.completed %}
+ {% if findings_text.completed %}
+ {% include './icon-list-icon.html' with completed=findings_text.completed %}
Workbook 3: Federal Awards Audit Findings Text (Complete)
Enter the full text of the audit finding, listing each finding only once, even if they relate to more than one program. Include the audit finding reference number for each. If there are no audit findings listed in Workbook 1: Federal Awards, you do not need to complete this workbook.
- {% if CAP_workbook.completed %}
- {% include './icon-list-icon.html' with completed=CAP_workbook.completed %}
+ {% if corrective_action_plan.completed %}
+ {% include './icon-list-icon.html' with completed=corrective_action_plan.completed %}
Workbook 4: Corrective Action Plan (CAP) (Complete)
This information should match the data you entered in the Findings Text workbook. You only need to enter plans for findings once if they relate to more than one program. If there are no audit findings listed in Workbook 1: Federal Awards, you do not need to complete this workbook.
Upload Workbook 4: Corrective Action Plan (CAP)
@@ -176,20 +176,20 @@
{% comment %} Audit Information Form {% endcomment %}
Audit Information Form (Complete)
This form is completed by both the auditee and auditor. Auditees complete this section using the Summary Schedule of Prior Audit Findings and Auditors complete this section using the information in the financial statement audit.
Audit Information Form
@@ -202,67 +202,71 @@
{% comment %} Workbook 5: Additional UEIs (optional) {% endcomment %}
- Workbook 5: Additional UEIs (Complete)
- This workbook is only necessary if multiple the audit report covers UEIs. It is completed by the auditee. List the additional UEIs covered by the audit, excluding the primary UEI.
-
- Upload Workbook 5: Additional UEIs (optional)
-
- This workbook is only necessary if multiple the audit report covers UEIs. It is completed by the auditee. List the additional UEIs covered by the audit, excluding the primary UEI.
- Workbook 5: Additional UEIs (Complete)
+ This workbook is only necessary if multiple the audit report covers UEIs. It is completed by the auditee. List the additional UEIs covered by the audit, excluding the primary UEI.
+
+ Upload Workbook 5: Additional UEIs (optional)
+
+ This workbook is only necessary if multiple the audit report covers UEIs. It is completed by the auditee. List the additional UEIs covered by the audit, excluding the primary UEI.
+ Workbook 6: Secondary Auditors (Complete)
- This workbook is only necessary if multiple auditors did the audit work. In these cases, the secondary auditors should enter their information in this workbook.
-
- Upload Workbook 6: Secondary Auditors (optional)
-
- This workbook is only necessary if multiple auditors did the audit work. In these cases, the secondary auditors should enter their information in this workbook.
- Workbook 6: Secondary Auditors (Complete)
+ This workbook is only necessary if multiple auditors did the audit work. In these cases, the secondary auditors should enter their information in this workbook.
+
+ Upload Workbook 6: Secondary Auditors (optional)
+
+ This workbook is only necessary if multiple auditors did the audit work. In these cases, the secondary auditors should enter their information in this workbook.
+ Pre-submission validation (Complete) Before submitting your audit, use our validation tool to check your workbooks for errors.
Pre-submission validation
+ href="{% url 'audit:CrossValidation' report_id %}">Pre-submission validation
Before submitting your audit, use our validation tool to check your workbooks for errors. Auditor Certification (Complete)
Your certifying auditor must attest and sign the single audit package before submission. In order to unlock this step, you must complete and upload all required workbooks from the previous steps, as well as the single audit report PDF.
Auditor Certification
@@ -340,16 +344,16 @@
{% comment %} Auditee Certification {% endcomment %}
Auditee Certification (Complete)
Your certifying auditee must attest and sign the single audit package before submission. In order to unlock this step, you must complete and upload all required workbooks from the previous steps, as well as the single audit report PDF.
Auditee Certification
diff --git a/backend/audit/test_etl.py b/backend/audit/test_etl.py
index 8d6e5ed3ea..d6baeb2341 100644
--- a/backend/audit/test_etl.py
+++ b/backend/audit/test_etl.py
@@ -91,7 +91,6 @@ def _fake_federal_awards():
"award_reference": "ABC123",
"cluster": {"cluster_name": "N/A", "cluster_total": 0},
"program": {
- "is_major": "Y",
"program_name": "RETIRED AND SENIOR VOLUNTEER PROGRAM",
"amount_expended": 9000,
"audit_report_type": "U",
@@ -271,12 +270,23 @@ def test_load_general(self):
general = generals.first()
self.assertEqual(self.report_id, general.report_id)
+ def test_load_award_before_general_should_fail(self):
+ self.etl.load_federal_award()
+ federal_awards = FederalAward.objects.all()
+ self.assertEqual(len(federal_awards), 0)
+
def test_load_federal_award(self):
+ self.etl.load_general()
self.etl.load_federal_award()
federal_awards = FederalAward.objects.all()
self.assertEqual(len(federal_awards), 1)
federal_award = federal_awards.first()
self.assertEqual(self.report_id, federal_award.report_id)
+ general = General.objects.first()
+ self.assertEqual(
+ general.total_amount_expended,
+ self.sac.federal_awards["FederalAwards"].get("total_amount_expended"),
+ )
def test_load_findings(self):
self.etl.load_findings()
diff --git a/backend/audit/test_views.py b/backend/audit/test_views.py
index 86f814e052..073c3ada9f 100644
--- a/backend/audit/test_views.py
+++ b/backend/audit/test_views.py
@@ -1,4 +1,8 @@
import json
+from pathlib import Path
+from tempfile import NamedTemporaryFile
+from unittest.mock import patch
+
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
@@ -6,9 +10,6 @@
from django.test import Client
from model_bakery import baker
-from unittest.mock import patch
-
-from tempfile import NamedTemporaryFile
from openpyxl import load_workbook
from openpyxl.cell import Cell
@@ -30,8 +31,13 @@
NOTES_TO_SEFA_ENTRY_FIXTURES,
FORM_SECTIONS,
)
+from .fixtures.single_audit_checklist import (
+ fake_auditor_certification,
+ fake_auditee_certification,
+)
from .models import Access, SingleAuditChecklist
-from .views import MySubmissions
+from .views import MySubmissions, submission_progress_check
+from .cross_validation.sac_validation_shape import snake_to_camel
User = get_user_model()
@@ -59,6 +65,8 @@
"auditor_contacts": ["d@d.com"],
}
+AUDIT_JSON_FIXTURES = Path(__file__).parent / "fixtures" / "json"
+
# Mocking the user login and file scan functions
def _mock_login_and_scan(client, mock_scan_file):
@@ -94,6 +102,12 @@ def _make_user_and_sac(**kwargs):
return user, sac
+def _load_json(target):
+ """Given a str or Path, load JSON from that target."""
+ raw = Path(target).read_text(encoding="utf-8")
+ return json.loads(raw)
+
+
class MySubmissionsViewTests(TestCase):
def setUp(self):
self.user = baker.make(User)
@@ -188,11 +202,26 @@ def test_auditor_certification(self):
"""
Test that certifying auditor contacts can provide auditor certification
"""
+ data_step_1, data_step_2 = fake_auditor_certification()
+
user, sac = _make_user_and_sac(submission_status="ready_for_certification")
baker.make(Access, sac=sac, user=user, role="certifying_auditor_contact")
kwargs = {"report_id": sac.report_id}
- _authed_post(self.client, user, "audit:AuditorCertification", kwargs=kwargs)
+ _authed_post(
+ self.client,
+ user,
+ "audit:AuditorCertification",
+ kwargs=kwargs,
+ data=data_step_1,
+ )
+ _authed_post(
+ self.client,
+ user,
+ "audit:AuditorCertificationConfirm",
+ kwargs=kwargs,
+ data=data_step_2,
+ )
updated_sac = SingleAuditChecklist.objects.get(report_id=sac.report_id)
@@ -202,11 +231,26 @@ def test_auditee_certification(self):
"""
Test that certifying auditee contacts can provide auditee certification
"""
+ data_step_1, data_step_2 = fake_auditee_certification()
+
user, sac = _make_user_and_sac(submission_status="auditor_certified")
baker.make(Access, sac=sac, user=user, role="certifying_auditee_contact")
kwargs = {"report_id": sac.report_id}
- _authed_post(self.client, user, "audit:AuditeeCertification", kwargs=kwargs)
+ _authed_post(
+ self.client,
+ user,
+ "audit:AuditeeCertification",
+ kwargs=kwargs,
+ data=data_step_1,
+ )
+ _authed_post(
+ self.client,
+ user,
+ "audit:AuditeeCertificationConfirm",
+ kwargs=kwargs,
+ data=data_step_2,
+ )
updated_sac = SingleAuditChecklist.objects.get(report_id=sac.report_id)
@@ -949,3 +993,101 @@ def test_valid_file_upload_for_notes_to_sefa(self, mock_scan_file):
notes_to_sefa_entries["note_title"],
test_data[0]["note_title"],
)
+
+
+class SubmissionProgressViewTests(TestCase):
+ """
+ The page shows information about a submission and conditionally displays links/other
+ affordances for individual sections.
+ """
+
+ def setUp(self):
+ self.user = baker.make(User)
+ self.sac = baker.make(SingleAuditChecklist)
+ self.client = Client()
+
+ def test_login_required(self):
+ """When an unauthenticated request is made"""
+
+ response = self.client.post(
+ reverse(
+ "audit:SubmissionProgress",
+ kwargs={"report_id": "12345"},
+ )
+ )
+
+ self.assertEqual(response.status_code, 403)
+
+ def test_phrase_in_page(self):
+ """Check for 'Create a single audit submission'."""
+ baker.make(Access, user=self.user, sac=self.sac)
+ self.client.force_login(user=self.user)
+ phrase = "Single audit submission"
+ res = self.client.get(
+ reverse(
+ "audit:SubmissionProgress", kwargs={"report_id": self.sac.report_id}
+ )
+ )
+ self.assertIn(phrase, res.content.decode("utf-8"))
+
+ def test_submission_progress_check_geninfo_only(self):
+ """
+ Check the function containing the logic around which sections are required.
+
+ If the conditional questions all have negative answers and data is absent for
+ the rest, return the appropriate shape.
+ """
+ filename = "general-information--test0001test--simple-pass.json"
+ info = _load_json(AUDIT_JSON_FIXTURES / filename)
+ sac = baker.make(SingleAuditChecklist, general_information=info)
+ result = submission_progress_check(sac, None)
+ self.assertEqual(result["general_information"]["display"], "complete")
+ self.assertTrue(result["general_information"]["completed"])
+ conditional_keys = (
+ "additional_ueis",
+ "additional_eins",
+ "secondary_auditors",
+ )
+ for key in conditional_keys:
+ self.assertEqual(result[key]["display"], "hidden")
+ self.assertFalse(result["complete"])
+ baker.make(Access, user=self.user, sac=sac)
+ self.client.force_login(user=self.user)
+ res = self.client.get(
+ reverse("audit:SubmissionProgress", kwargs={"report_id": sac.report_id})
+ )
+ phrases = (
+ "Upload the Additional UEIs workbook",
+ "Upload the Additional EINs workbook",
+ "Upload the Secondary Auditors workbook",
+ )
+ for phrase in phrases:
+ self.assertNotIn(phrase, res.content.decode("utf-8"))
+
+ def test_submission_progress_check_simple_pass(self):
+ """
+ Check the function containing the logic around which sections are required.
+
+ If the conditional questions all have negative answers and data is present for
+ the rest, return the appropriate shape.
+
+
+ """
+ filename = "general-information--test0001test--simple-pass.json"
+ info = _load_json(AUDIT_JSON_FIXTURES / filename)
+ addl_sections = {}
+ for section_name, camel_name in snake_to_camel.items():
+ addl_sections[section_name] = {camel_name: "whatever"}
+ addl_sections["general_information"] = info
+ sac = baker.make(SingleAuditChecklist, **addl_sections)
+ result = submission_progress_check(sac, None)
+ self.assertEqual(result["general_information"]["display"], "complete")
+ self.assertTrue(result["general_information"]["completed"])
+ conditional_keys = (
+ "additional_ueis",
+ "additional_eins",
+ "secondary_auditors",
+ )
+ for key in conditional_keys:
+ self.assertEqual(result[key]["display"], "hidden")
+ self.assertTrue(result["complete"])
diff --git a/backend/audit/urls.py b/backend/audit/urls.py
index f0ffbcbdb1..f1b4c0c9a0 100644
--- a/backend/audit/urls.py
+++ b/backend/audit/urls.py
@@ -27,14 +27,24 @@ def camel_to_hyphen(raw: str) -> str:
),
path(
"auditor-certification/