Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into jmm/1729
Browse files Browse the repository at this point in the history
  • Loading branch information
JeanMarie-PM committed Aug 12, 2023
2 parents 3ab9f33 + cfb4a29 commit 2dd32e2
Show file tree
Hide file tree
Showing 77 changed files with 1,941 additions and 412 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/auto-merge-staging-pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
---
name: Auto Merge Pull Request With Approved Label
on:
repository_dispatch:
types: [ ready-to-merge ]
jobs:
auto-merge:
name: Auto Merge The Created Pull Request
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- id: automerge
name: Auto Merge a PR with the correct labels
uses: pascalgn/[email protected]
env:
GITHUB_TOKEN: ${{ secrets.DEPLOY_TOKEN }}
MERGE_LABELS: "automerge,autogenerated"
MERGE_METHOD: "merge"
23 changes: 17 additions & 6 deletions .github/workflows/create-pull-request-to-staging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on:
workflow_dispatch:

env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_TOKEN: ${{ secrets.DEPLOY_TOKEN }}

jobs:
check-commit:
Expand Down Expand Up @@ -52,13 +52,24 @@ jobs:
- name: Open Pull Request
id: open-pr
run: |
gh pr create --label "autogenerated" --title "${{ steps.date.outputs.date }} | MAIN --> PROD | DEV (${{ steps.git-short.outputs.short_sha }}) --> STAGING" --body "This is an auto-generated pull request to merge main into prod for a staging release on ${{ steps.date.outputs.date }} with the last commit being merged as ${{ steps.git-short.outputs.short_sha }}" --base prod --head main
gh pr create --label "autogenerated" --label "automerge" \
--title "${{ steps.date.outputs.date }} | MAIN --> PROD | DEV (${{ steps.git-short.outputs.short_sha }}) --> STAGING" \
--body "This is an auto-generated pull request to merge main into prod for a staging release on ${{ steps.date.outputs.date }} with the last commit being merged as ${{ steps.git-short.outputs.short_sha }}" \
--base prod --head main
- name: Get Pull Request Number
id: pr-number
run: echo "pull_request_number=$(gh pr view --json number -q .number || echo "")" >> $GITHUB_OUTPUT

- name: Comment on Pull Request
id: pr-comment
run: |
gh pr comment ${{ steps.pr-number.outputs.pull_request_number }} --body "Please close and re-open this pull request to ensure that a terraform plan is generated on the PR for the staging deployment after merging."
auto-approve:
name: Auto Approve The Created Pull Request
needs: [pull-request]
if: ${{ needs.pull-request.outputs.pr_number != null }}
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: hmarr/auto-approve-action@v3
with:
pull-request-number: ${{ needs.pull-request.outputs.pr_number }}
github-token: ${{ secrets.GITHUB_TOKEN }}
40 changes: 40 additions & 0 deletions .github/workflows/create-release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
---
name: Create Release
on:
# schedule:
# - cron: '0 18 * * 3'
workflow_dispatch: null

jobs:
release:
permissions: write-all
name: Create Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
ref: prod

- name: Get Current Date
id: date
run: |
echo "date=$(date +%Y%m%d)" >> $GITHUB_OUTPUT
- name: Changelog
uses: Bullrich/generate-release-changelog@master
id: changelog
env:
REPO: ${{ github.repository }}

- name: Create Release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v1.${{ steps.date.outputs.date }}
release_name: Release for ${{ steps.date.outputs.date }}
body: |
${{ steps.changelog.outputs.changelog }}
draft: false
prerelease: true
16 changes: 16 additions & 0 deletions .github/workflows/pull-request-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,3 +86,19 @@ jobs:
with:
environment: "production"
secrets: inherit

repo-event:
if: ${{ github.base_ref == 'prod' }}
name: Set Repository Event
permissions:
contents: write
runs-on: ubuntu-latest
needs: [terraform-plan-staging, testing-from-ghcr]
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
event-type: ready-to-merge
client-payload: '{"github": ${{ toJson(github) }}}'
if: github.event_name == 'pull_request'
4 changes: 4 additions & 0 deletions backend/api/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from django.http import Http404, HttpResponse, JsonResponse
from django.urls import reverse
from django.views import View, generic
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from rest_framework import viewsets
from rest_framework.authentication import BaseAuthentication
Expand All @@ -23,6 +24,8 @@
UEISerializer,
)

UserModel = get_user_model()

AUDITEE_INFO_PREVIOUS_STEP_DATA_WE_NEED = [
"user_provided_organization_type",
"met_spending_threshold",
Expand Down Expand Up @@ -128,6 +131,7 @@ def access_and_submission_check(user, data):
role="certifying_auditor_contact",
email=serializer.data.get("certifying_auditor_contact"),
)

for contact in serializer.data.get("auditee_contacts"):
Access.objects.create(sac=sac, role="editor", email=contact)
for contact in serializer.data.get("auditor_contacts"):
Expand Down
4 changes: 3 additions & 1 deletion backend/audit/cross_validation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,13 @@
]
"""
from .auditee_ueis_match import auditee_ueis_match
from .additional_ueis import additional_ueis
from .auditee_ueis_match import auditee_ueis_match
from .audit_findings import audit_findings
from .sac_validation_shape import sac_validation_shape # noqa: F401

functions = [
audit_findings,
auditee_ueis_match,
additional_ueis,
]
28 changes: 28 additions & 0 deletions backend/audit/cross_validation/audit_findings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
def audit_findings(sac_dict):
"""
Checks that the number of audit findings in the Federal Awards and Audit Findings
sections are consistent.
"""
all_sections = sac_dict["sf_sac_sections"]
awards_outer = all_sections.get("federal_awards", {})
awards = awards_outer.get("federal_awards", []) if awards_outer else []
finds_outer = all_sections.get("findings_text", {})
findings = finds_outer.get("findings_text_entries", []) if finds_outer else []
# If both empty, that's consistent:
if not awards and not findings:
return []
if awards:
# How to determine if findings is required? Loop through the awards?
num_expected = sum(_.get("number_of_audit_findings", 0) for _ in findings)
# For now, just check for non-zero
if num_expected and not awards:
return [
{
"error": "There are findings listed in Federal Awards "
" but none in Findings Text"
}
]

if False > 1:
return [{"error": "error message"}]
return []
4 changes: 4 additions & 0 deletions backend/audit/cross_validation/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ def err_auditee_ueis_match():
return "Not all auditee UEIs matched."


def err_awards_findings_but_no_findings_text():
return "There are findings indicated in Federal Awards but" "none in Findings Text."


def err_missing_tribal_data_sharing_consent():
return (
"As a tribal organization, you must complete the data "
Expand Down
75 changes: 60 additions & 15 deletions backend/audit/etl.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def load_all(self):
self.load_passthrough,
self.load_finding_texts,
self.load_captext,
# self.load_audit_info() # TODO: Uncomment when SingleAuditChecklist adds audit_information
self.load_audit_info,
)
for load_method in load_methods:
try:
Expand All @@ -47,6 +47,11 @@ def load_all(self):

def load_finding_texts(self):
findings_text = self.single_audit_checklist.findings_text

if not findings_text:
logger.warning("No finding texts found to load")
return

findings_text_entries = findings_text["FindingsText"]["findings_text_entries"]
for entry in findings_text_entries:
finding_text_ = FindingText(
Expand All @@ -61,6 +66,10 @@ def load_findings(self):
findings_uniform_guidance = (
self.single_audit_checklist.findings_uniform_guidance
)
if not findings_uniform_guidance:
logger.warning("No findings found to load")
return

findings_uniform_guidance_entries = findings_uniform_guidance[
"FindingsUniformGuidance"
]["findings_uniform_guidance_entries"]
Expand All @@ -86,6 +95,19 @@ def load_findings(self):

def load_federal_award(self):
federal_awards = self.single_audit_checklist.federal_awards
report_id = self.single_audit_checklist.report_id
try:
general = General.objects.get(report_id=report_id)
except General.DoesNotExist:
logger.error(
f"General must be loaded before FederalAward. report_id = {report_id}"
)
return
general.total_amount_expended = federal_awards["FederalAwards"].get(
"total_amount_expended"
)
general.save()

for entry in federal_awards["FederalAwards"]["federal_awards"]:
program = entry["program"]
loan = entry["loan_or_loan_guarantee"]
Expand Down Expand Up @@ -113,17 +135,19 @@ def load_federal_award(self):
is_loan=loan["is_guaranteed"] == "Y",
loan_balance=loan["loan_balance_at_audit_period_end"],
is_direct=is_direct,
is_major=program["is_major"] == "Y",
mp_audit_report_type=program["audit_report_type"],
findings_count=program["number_of_audit_findings"],
is_passthrough_award=is_passthrough,
passthrough_amount=subrecipient_amount,
type_requirement=None, # TODO: What is this?
)
federal_award.save()

def load_captext(self):
corrective_action_plan = self.single_audit_checklist.corrective_action_plan
if not corrective_action_plan:
logger.warning("No corrective action plans found to load")
return

corrective_action_plan_entries = corrective_action_plan["CorrectiveActionPlan"][
"corrective_action_plan_entries"
]
Expand All @@ -138,6 +162,10 @@ def load_captext(self):

def load_note(self):
notes_to_sefa = self.single_audit_checklist.notes_to_sefa["NotesToSefa"]
if not notes_to_sefa:
logger.warning("No notes to sefa found to load")
return

accounting_policies = notes_to_sefa["accounting_policies"]
is_minimis_rate_used = notes_to_sefa["is_minimis_rate_used"] == "Y"
rate_explained = notes_to_sefa["rate_explained"]
Expand Down Expand Up @@ -190,6 +218,13 @@ def load_revision(self):
def load_passthrough(self):
federal_awards = self.single_audit_checklist.federal_awards
for entry in federal_awards["FederalAwards"]["federal_awards"]:
entities = (
entry["direct_or_indirect_award"]
and entry["direct_or_indirect_award"]["entities"]
)
if not entities:
logger.warning("No passthrough to load")
return
for entity in entry["direct_or_indirect_award"]["entities"]:
passthrough = Passthrough(
award_reference=entry["award_reference"],
Expand Down Expand Up @@ -274,9 +309,7 @@ def load_general(self):
entity_type=general_information["user_provided_organization_type"],
number_months=general_information["audit_period_other_months"],
audit_period_covered=general_information["audit_period_covered"],
is_report_required=None, # TODO: Notes say this hasn't been used since 2008.
total_fed_expenditures=None, # TODO: Where does this come from?
type_report_major_program=None, # TODO: Where does this come from?
total_amount_expended=None, # loaded from FederalAward
type_audit_code="UG",
is_public=self.single_audit_checklist.is_public,
data_source="G-FAC",
Expand All @@ -285,6 +318,9 @@ def load_general(self):

def load_secondary_auditor(self):
secondary_auditors = self.single_audit_checklist.secondary_auditors
if not secondary_auditors:
logger.warning("No secondary_auditors found to load")
return

for secondary_auditor in secondary_auditors["SecondaryAuditors"][
"secondary_auditors_entries"
Expand All @@ -306,17 +342,26 @@ def load_secondary_auditor(self):
sec_auditor.save()

def load_audit_info(self):
general = General.objects.get(report_id=self.single_audit_checklist.report_id)
report_id = self.single_audit_checklist.report_id
try:
general = General.objects.get(report_id=report_id)
except General.DoesNotExist:
logger.error(
f"General must be loaded before AuditInfo. report_id = {report_id}"
)
return
audit_information = self.single_audit_checklist.audit_information

if not audit_information:
logger.warning("No audit info found to load")
return
general.gaap_results = audit_information["gaap_results"]
"""
TODO:
Missing in schema
general.sp_framework = audit_information[]
general.is_sp_framework_required = audit_information[]
general.sp_framework_auditor_opinion = audit_information[]
"""
general.sp_framework = audit_information["sp_framework_basis"]
general.is_sp_framework_required = (
audit_information["is_sp_framework_required"] == "Y"
)
general.sp_framework_auditor_opinion = audit_information[
"sp_framework_opinions"
]
general.is_going_concern = audit_information["is_going_concern_included"] == "Y"
general.is_significant_deficiency = (
audit_information["is_internal_control_deficiency_disclosed"] == "Y"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
{
"FederalAwards": {
"auditee_uei": "TEST0001TEST",
"total_amount_expended": 12345,
"federal_awards": [
{
"program": {
"federal_agency_prefix": "42",
"three_digit_extension": "RD",
"additional_award_identification": 1234,
"program_name": "FLOWER DREAMING",
"is_major": "N",
"audit_report_type": "",
"number_of_audit_findings": 0,
"amount_expended": 500,
"federal_program_total": 500
},
"loan_or_loan_guarantee": {
"is_guaranteed": "N",
"loan_balance_at_audit_period_end": 0
},
"direct_or_indirect_award": {
"is_direct": "N",
"entities": [
{
"passthrough_name": "Lothlórien",
"passthrough_identifying_number": "54321"
}
]
},
"cluster": {
"cluster_name": "N/A",
"cluster_total": 0
},
"subrecipients": {
"is_passed": "N"
},
"award_reference": "AWARD-0001"
}
]
}
}
Loading

0 comments on commit 2dd32e2

Please sign in to comment.