Skip to content

Commit

Permalink
Merge pull request #1803 from GSA-TTS/1658-cross-validation-validate-…
Browse files Browse the repository at this point in the history
…number-of-findings-across-awards-and-audit-findings

1658 cross validation validate number of findings across awards and audit findings
  • Loading branch information
sambodeme authored Aug 15, 2023
2 parents 35dcf52 + 388123f commit 5f90a24
Show file tree
Hide file tree
Showing 5 changed files with 190 additions and 0 deletions.
2 changes: 2 additions & 0 deletions backend/audit/cross_validation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
]
"""
from .number_of_findings import number_of_findings
from .additional_ueis import additional_ueis
from .auditee_ueis_match import auditee_ueis_match
from .audit_findings import audit_findings
Expand All @@ -66,6 +67,7 @@
audit_findings,
auditee_ueis_match,
additional_ueis,
number_of_findings,
submission_progress_check,
tribal_data_sharing_consent,
]
12 changes: 12 additions & 0 deletions backend/audit/cross_validation/errors.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
from audit.fixtures.excel import (
SECTION_NAMES,
)


def err_additional_ueis_empty():
return (
"general_information.multiple_ueis_covered is checked, "
Expand Down Expand Up @@ -29,3 +34,10 @@ def err_missing_tribal_data_sharing_consent():
"As a tribal organization, you must complete the data "
"sharing consent statement before submitting your audit."
)


def err_number_of_findings_inconsistent(total_expected, total_counted, award_ref):
return (
f"You reported {total_expected} findings for award {award_ref} in the {SECTION_NAMES.FEDERAL_AWARDS} workbook, "
f"but declared {total_counted} findings for the same award in the {SECTION_NAMES.FEDERAL_AWARDS_AUDIT_FINDINGS} workbook."
)
52 changes: 52 additions & 0 deletions backend/audit/cross_validation/number_of_findings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from .errors import (
err_number_of_findings_inconsistent,
)
from collections import defaultdict


def number_of_findings(sac_dict, *_args, **_kwargs):
"""
Checks that the number of findings mentioned in Federal Awards matches
the number of findings referenced in Federal Awards Audit Findings.
"""

all_sections = sac_dict.get("sf_sac_sections", {})
federal_awards_section = all_sections.get("federal_awards") or {}
federal_awards = federal_awards_section.get("federal_awards", [])
findings_uniform_guidance_section = (
all_sections.get("findings_uniform_guidance") or {}
)
findings_uniform_guidance = findings_uniform_guidance_section.get(
"findings_uniform_guidance_entries", []
)

expected_award_refs_count = {}
found_award_refs_count = defaultdict(int)
errors = []

for award in federal_awards:
award_reference = award.get("award_reference", None)
if award_reference:
expected_award_refs_count[award_reference] = award["program"][
"number_of_audit_findings"
]

for finding in findings_uniform_guidance:
award_ref = finding["program"]["award_reference"]
if award_ref in expected_award_refs_count:
found_award_refs_count[award_ref] += 1

for award_ref, expected in expected_award_refs_count.items():
counted = found_award_refs_count[award_ref]
if counted != expected:
errors.append(
{
"error": err_number_of_findings_inconsistent(
expected,
counted,
award_ref,
)
}
)

return errors
101 changes: 101 additions & 0 deletions backend/audit/cross_validation/test_number_of_findings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import random
from django.test import TestCase
from audit.models import SingleAuditChecklist
from .errors import err_number_of_findings_inconsistent
from .number_of_findings import number_of_findings
from .sac_validation_shape import sac_validation_shape
from model_bakery import baker


class NumberOfFindingsTests(TestCase):
AWARD_MIN = 1000
AWARD_MAX = 2000
FINDINGS_MIN = 1
FINDINGS_MAX = 5

def _random(self, min, max):
return random.randint(min, max) # nosec

def _award_reference(self):
return f"AWARD-{self._random(self.AWARD_MIN,self.AWARD_MAX)}"

def _make_federal_awards(self, findings_count) -> dict:
number_of_award = self._random(2, 4)
return {
"FederalAwards": {
"federal_awards": [
{
"program": {"number_of_audit_findings": findings_count},
"award_reference": self._award_reference(),
}
for _ in range(number_of_award)
]
}
}

def _make_findings_uniform_guidance(self, awards, mismatch) -> dict:
entries = []
for award in awards["FederalAwards"]["federal_awards"]:
award_reference = award["award_reference"]
count = award["program"]["number_of_audit_findings"]
for _ in range(count + mismatch):
entries.append({"program": {"award_reference": award_reference}})

findings = (
{
"auditee_uei": "AAA123456BBB",
"findings_uniform_guidance_entries": entries,
}
if len(entries) > 0
else {"auditee_uei": "AAA123456BBB"}
)

return {"FindingsUniformGuidance": findings}

def _make_sac(self, findings_count, mismatch=0) -> SingleAuditChecklist:
sac = baker.make(SingleAuditChecklist)
sac.federal_awards = self._make_federal_awards(findings_count)
sac.findings_uniform_guidance = self._make_findings_uniform_guidance(
sac.federal_awards, mismatch
)
return sac

def test_zero_findings_count_report(self):
"""Ensure no error is returned for consistent zero findings."""
sac = self._make_sac(0)
errors = number_of_findings(sac_validation_shape(sac))
self.assertEqual(errors, [])

def test_findings_count_matches_across_workbooks(self):
"""Ensure no error is returned for consistent findings count."""
sac = self._make_sac(self._random(self.FINDINGS_MIN, self.FINDINGS_MAX))
errors = number_of_findings(sac_validation_shape(sac))
self.assertEqual(errors, [])

def _test_findings_count_mismatch(self, base_count, mismatch):
sac = self._make_sac(base_count, mismatch)
errors = number_of_findings(sac_validation_shape(sac))
self.assertEqual(
len(errors), len(sac.federal_awards["FederalAwards"]["federal_awards"])
)

for award in sac.federal_awards["FederalAwards"]["federal_awards"]:
award_reference = award["award_reference"]
expected_error = err_number_of_findings_inconsistent(
base_count, base_count + mismatch, award_reference
)
self.assertIn({"error": expected_error}, errors)

def test_reported_findings_exceed_declared_count(self):
"""
Expect errors when the number of findings in the Federal Awards Audit Findings workbook,
a.k.a the Findings Uniform Guidance workbook, exceeds those declared in the Federal Awards workbook.
"""
self._test_findings_count_mismatch(self._random(2, 4), self._random(1, 2))

def test_declared_findings_exceed_reported_count(self):
"""
Expect errors when the number of findings in the Federal Awards workbook
exceeds those reported in the Federal Awards Audit Findings workbook.
"""
self._test_findings_count_mismatch(self._random(2, 4), self._random(-2, -1))
23 changes: 23 additions & 0 deletions backend/audit/fixtures/excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,26 @@
SECONDARY_AUDITORS="SecondaryAuditors",
NOTES_TO_SEFA="NotesToSefa",
)

# FIXME MSHD: We should consolidate SectionNames with the above FormSections
SectionNames = namedtuple(
"SectionNames",
(
"ADDITIONAL_UEIS",
"AUDIT_FINDINGS_TEXT",
"CORRECTIVE_ACTION_PLAN",
"FEDERAL_AWARDS",
"FEDERAL_AWARDS_AUDIT_FINDINGS",
"NOTES_TO_SEFA",
"SECONDARY_AUDITORS",
),
)
SECTION_NAMES = SectionNames(
ADDITIONAL_UEIS="Additional UEIs",
AUDIT_FINDINGS_TEXT="Audit Findings Text",
CORRECTIVE_ACTION_PLAN="Corrective Action Plan",
FEDERAL_AWARDS="Federal Awards",
FEDERAL_AWARDS_AUDIT_FINDINGS="Federal Awards Audit Findings",
NOTES_TO_SEFA="Notes to SEFA",
SECONDARY_AUDITORS="Secondary Auditors",
)

0 comments on commit 5f90a24

Please sign in to comment.