-
Notifications
You must be signed in to change notification settings - Fork 7
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #1803 from GSA-TTS/1658-cross-validation-validate-…
…number-of-findings-across-awards-and-audit-findings 1658 cross validation validate number of findings across awards and audit findings
- Loading branch information
Showing
5 changed files
with
190 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
from .errors import ( | ||
err_number_of_findings_inconsistent, | ||
) | ||
from collections import defaultdict | ||
|
||
|
||
def number_of_findings(sac_dict, *_args, **_kwargs): | ||
""" | ||
Checks that the number of findings mentioned in Federal Awards matches | ||
the number of findings referenced in Federal Awards Audit Findings. | ||
""" | ||
|
||
all_sections = sac_dict.get("sf_sac_sections", {}) | ||
federal_awards_section = all_sections.get("federal_awards") or {} | ||
federal_awards = federal_awards_section.get("federal_awards", []) | ||
findings_uniform_guidance_section = ( | ||
all_sections.get("findings_uniform_guidance") or {} | ||
) | ||
findings_uniform_guidance = findings_uniform_guidance_section.get( | ||
"findings_uniform_guidance_entries", [] | ||
) | ||
|
||
expected_award_refs_count = {} | ||
found_award_refs_count = defaultdict(int) | ||
errors = [] | ||
|
||
for award in federal_awards: | ||
award_reference = award.get("award_reference", None) | ||
if award_reference: | ||
expected_award_refs_count[award_reference] = award["program"][ | ||
"number_of_audit_findings" | ||
] | ||
|
||
for finding in findings_uniform_guidance: | ||
award_ref = finding["program"]["award_reference"] | ||
if award_ref in expected_award_refs_count: | ||
found_award_refs_count[award_ref] += 1 | ||
|
||
for award_ref, expected in expected_award_refs_count.items(): | ||
counted = found_award_refs_count[award_ref] | ||
if counted != expected: | ||
errors.append( | ||
{ | ||
"error": err_number_of_findings_inconsistent( | ||
expected, | ||
counted, | ||
award_ref, | ||
) | ||
} | ||
) | ||
|
||
return errors |
101 changes: 101 additions & 0 deletions
101
backend/audit/cross_validation/test_number_of_findings.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import random | ||
from django.test import TestCase | ||
from audit.models import SingleAuditChecklist | ||
from .errors import err_number_of_findings_inconsistent | ||
from .number_of_findings import number_of_findings | ||
from .sac_validation_shape import sac_validation_shape | ||
from model_bakery import baker | ||
|
||
|
||
class NumberOfFindingsTests(TestCase): | ||
AWARD_MIN = 1000 | ||
AWARD_MAX = 2000 | ||
FINDINGS_MIN = 1 | ||
FINDINGS_MAX = 5 | ||
|
||
def _random(self, min, max): | ||
return random.randint(min, max) # nosec | ||
|
||
def _award_reference(self): | ||
return f"AWARD-{self._random(self.AWARD_MIN,self.AWARD_MAX)}" | ||
|
||
def _make_federal_awards(self, findings_count) -> dict: | ||
number_of_award = self._random(2, 4) | ||
return { | ||
"FederalAwards": { | ||
"federal_awards": [ | ||
{ | ||
"program": {"number_of_audit_findings": findings_count}, | ||
"award_reference": self._award_reference(), | ||
} | ||
for _ in range(number_of_award) | ||
] | ||
} | ||
} | ||
|
||
def _make_findings_uniform_guidance(self, awards, mismatch) -> dict: | ||
entries = [] | ||
for award in awards["FederalAwards"]["federal_awards"]: | ||
award_reference = award["award_reference"] | ||
count = award["program"]["number_of_audit_findings"] | ||
for _ in range(count + mismatch): | ||
entries.append({"program": {"award_reference": award_reference}}) | ||
|
||
findings = ( | ||
{ | ||
"auditee_uei": "AAA123456BBB", | ||
"findings_uniform_guidance_entries": entries, | ||
} | ||
if len(entries) > 0 | ||
else {"auditee_uei": "AAA123456BBB"} | ||
) | ||
|
||
return {"FindingsUniformGuidance": findings} | ||
|
||
def _make_sac(self, findings_count, mismatch=0) -> SingleAuditChecklist: | ||
sac = baker.make(SingleAuditChecklist) | ||
sac.federal_awards = self._make_federal_awards(findings_count) | ||
sac.findings_uniform_guidance = self._make_findings_uniform_guidance( | ||
sac.federal_awards, mismatch | ||
) | ||
return sac | ||
|
||
def test_zero_findings_count_report(self): | ||
"""Ensure no error is returned for consistent zero findings.""" | ||
sac = self._make_sac(0) | ||
errors = number_of_findings(sac_validation_shape(sac)) | ||
self.assertEqual(errors, []) | ||
|
||
def test_findings_count_matches_across_workbooks(self): | ||
"""Ensure no error is returned for consistent findings count.""" | ||
sac = self._make_sac(self._random(self.FINDINGS_MIN, self.FINDINGS_MAX)) | ||
errors = number_of_findings(sac_validation_shape(sac)) | ||
self.assertEqual(errors, []) | ||
|
||
def _test_findings_count_mismatch(self, base_count, mismatch): | ||
sac = self._make_sac(base_count, mismatch) | ||
errors = number_of_findings(sac_validation_shape(sac)) | ||
self.assertEqual( | ||
len(errors), len(sac.federal_awards["FederalAwards"]["federal_awards"]) | ||
) | ||
|
||
for award in sac.federal_awards["FederalAwards"]["federal_awards"]: | ||
award_reference = award["award_reference"] | ||
expected_error = err_number_of_findings_inconsistent( | ||
base_count, base_count + mismatch, award_reference | ||
) | ||
self.assertIn({"error": expected_error}, errors) | ||
|
||
def test_reported_findings_exceed_declared_count(self): | ||
""" | ||
Expect errors when the number of findings in the Federal Awards Audit Findings workbook, | ||
a.k.a the Findings Uniform Guidance workbook, exceeds those declared in the Federal Awards workbook. | ||
""" | ||
self._test_findings_count_mismatch(self._random(2, 4), self._random(1, 2)) | ||
|
||
def test_declared_findings_exceed_reported_count(self): | ||
""" | ||
Expect errors when the number of findings in the Federal Awards workbook | ||
exceeds those reported in the Federal Awards Audit Findings workbook. | ||
""" | ||
self._test_findings_count_mismatch(self._random(2, 4), self._random(-2, -1)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters