Skip to content

Commit

Permalink
fix: fix effective assessment type (#2107)
Browse files Browse the repository at this point in the history
* fix: return correct effective assessment type

Additionally, extended Grades API to return this value and added to ORA
Data Accessor.

* fix: handle score override in effective assessment

Add score_overridden to grades API and add logic to effective assessment
type checking.

* chore: update ORA to 6.0.1
  • Loading branch information
nsprenkle authored Nov 13, 2023
1 parent 92d7a92 commit 9bcad98
Show file tree
Hide file tree
Showing 7 changed files with 162 additions and 69 deletions.
2 changes: 1 addition & 1 deletion openassessment/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
Initialization Information for Open Assessment Module
"""

__version__ = '6.0.0'
__version__ = '6.0.1'
40 changes: 40 additions & 0 deletions openassessment/xblock/apis/grades_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,46 @@ def __init__(self, block):
def _get_submission_uuid(self):
return self._block.submission_uuid

@property
def score_overridden(self):
"""
Determine if score was overridden by staff.
Adapted from grade_mixin._get_assessment_type.
Returns: True if score was overridden by staff, False otherwise.
"""
workflow = self._block.get_workflow_info()
score = workflow['score']

complete = score is not None
grade_annotation_types = [annotation['annotation_type'] for annotation in (score or {}).get("annotations", [])]
if complete and "staff_defined" in grade_annotation_types:
return True

return False

@property
def effective_assessment_type(self):
"""
Determine which assessment step we will use as our "graded" step.
This follows the order:
1) Staff (if assessment received / overridden)
2) Peer (if assessment step configured)
3) Self (if assessment step configured)
NOTE: The logic in a few places differs, but this combines the best I've found.
"""
if self.staff_score is not None or self.score_overridden:
return "staff"
elif "peer-assessment" in self._block.assessment_steps:
return "peer"
elif "self-assessment" in self._block.assessment_steps:
return "self"

# To make pylint happy
return None

@property
def self_score(self):
"""
Expand Down
5 changes: 5 additions & 0 deletions openassessment/xblock/apis/ora_data_accessor.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""API Data wrapper for exposed APIs within ORA XBlock"""
from openassessment.xblock.apis.grades_api import GradesAPI
from openassessment.xblock.apis.ora_config_api import ORAConfigAPI
from openassessment.xblock.apis.submissions.submissions_api import SubmissionAPI
from openassessment.xblock.apis.workflow_api import WorkflowAPI
Expand Down Expand Up @@ -28,6 +29,10 @@ def submission_data(self):
def workflow_data(self):
return WorkflowAPI(self._block)

@property
def grades_data(self):
return GradesAPI(self._block)

@property
def self_assessment_data(self):
return SelfAssessmentAPI(self._block)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,18 +114,12 @@ class AssessmentGradeSerializer(Serializer):
}
"""

effectiveAssessmentType = SerializerMethodField()
effectiveAssessmentType = CharField(source="grades_data.effective_assessment_type")
self = AssessmentStepSerializer(source="self_assessment_data.assessment")
staff = AssessmentStepSerializer(source="staff_assessment_data.assessment")
peer = PeerAssessmentsSerializer(source="peer_assessment_data")
peerUnweighted = UnweightedPeerAssessmentsSerializer(source="peer_assessment_data")

def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument
"""
Get effective assessment type
"""
return self.context["step"]


class AssessmentResponseSerializer(Serializer):
"""
Expand Down
172 changes: 113 additions & 59 deletions openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
)
from openassessment.xblock.ui_mixins.mfe.assessment_serializers import (
AssessmentResponseSerializer,
AssessmentStepSerializer,
AssessmentGradeSerializer,
AssessmentScoreSerializer,
AssessmentDataSerializer,
Expand Down Expand Up @@ -198,51 +197,74 @@ def test_scored_unscored(self, xblock):
{}
)

context = {"response": submission, "step": "peer"}
context = {"response": submission, "step": "done"}

# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data

# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])
self.assertEqual("peer", data["effectiveAssessmentType"])
self.assertEqual(data["peer"]["stepScore"], {'earned': 5, 'possible': 6})
self.assertEqual(len(data["peer"]["assessments"]), 2)
self.assertIsNone(data["peerUnweighted"]["stepScore"])
self.assertEqual(len(data["peerUnweighted"]["assessments"]), 2)


class TestAssessmentGradeSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin):

maxDiff = None

def assertNestedDictEquals(self, dict_1, dict_2):
# Manually expand nested dicts for comparison
dict_1_expanded = json.loads(json.dumps(dict_1))
dict_2_expanded = json.loads(json.dumps(dict_2))
return self.assertDictEqual(dict_1_expanded, dict_2_expanded)

ASSESSMENT = {
'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'},
'criterion_feedback': {},
'overall_feedback': ""
}

@scenario("data/self_assessment_scenario.xml", user_id="Alan")
@scenario("data/self_only_scenario.xml", user_id="Alan")
def test_self_assessment_step(self, xblock):
submission_text = ["Foo", "Bar"]

submission = self.create_test_submission(
xblock, submission_text=submission_text
)

context = {"response": submission, "step": "self"}
context = {"response": submission, "step": "done"}

# The self-only example uses a different rubric
self_assessment = {
'options_selected': {'Concise': 'Robert Heinlein', 'Clear-headed': 'Spock', 'Form': 'Reddit'},
'criterion_feedback': {},
'overall_feedback': "I'm so cool",
}

resp = self.request(
xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json"
xblock, "self_assess", json.dumps(self_assessment), response_format="json"
)
self.assertTrue(resp["success"])

# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data
# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])
self.assertEqual(
data["self"],
AssessmentStepSerializer(
xblock.api_data.self_assessment_data.assessment, context=context
).data,
)

# Then I get the appropriate assessment data
expected_assessment_type = "self"
self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"])

score_details = data[expected_assessment_type]
self.assertDictEqual(score_details["assessment"], {
"overallFeedback": self_assessment['overall_feedback'],
"criteria": [
{"selectedOption": 2, "feedback": ""},
{"selectedOption": 4, "feedback": ""},
{"selectedOption": 2, "feedback": ""},
]
})
self.assertDictEqual(score_details["stepScore"], {"earned": 15, "possible": 20})

@scenario("data/grade_scenario.xml", user_id="Alan")
def test_staff_assessment_step(self, xblock):
Expand All @@ -253,21 +275,33 @@ def test_staff_assessment_step(self, xblock):

self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT)

context = {"response": submission, "step": "staff"}
context = {"response": submission, "step": "done"}

# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data

# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])
self.assertEqual(
data["staff"],
AssessmentStepSerializer(
xblock.api_data.staff_assessment_data.assessment, context=context
).data,
)
# Then I get the appropriate assessment data
expected_assessment_type = "staff"
self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"])

score_details = data[expected_assessment_type]
self.assertNestedDictEquals(score_details["assessment"], {
"overallFeedback": STAFF_GOOD_ASSESSMENT["overall_feedback"],
"criteria": [
{
"selectedOption": 0,
"feedback": '',
},
{
"selectedOption": 1,
"feedback": '',
}
]
})
self.assertDictEqual(score_details["stepScore"], {"earned": 5, "possible": 6})

@scenario("data/grade_scenario.xml", user_id="Bernard")
def test_peer_assement_steps(self, xblock):
def test_peer_assessment_steps(self, xblock):
# Create a submission from the user
student_item = xblock.get_student_item_dict()
submission = self.create_test_submission(
Expand All @@ -292,60 +326,80 @@ def test_peer_assement_steps(self, xblock):
graded_by,
)

context = {"response": submission, "step": "peer"}
context = {"response": submission, "step": "done"}

# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data

# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])
self.assertEqual(data["peer"], {'stepScore': None, 'assessments': []})
# Then I get the appropriate assessment data
expected_assessment_type = "peer"
self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"])

score_details = data[expected_assessment_type]
self.assertDictEqual(score_details, {'stepScore': None, 'assessments': []})

self.assertIsNone(data["peerUnweighted"]['stepScore'])
self.assertEqual(len(data["peerUnweighted"]['assessments']), len(self.PEERS))

@scenario("data/grade_scenario.xml", user_id="Alan")
def test_assessment_step_score(self, xblock):
submission_text = ["Foo", "Bar"]
@scenario("data/grade_scenario.xml", user_id="Bernard")
def test_staff_override(self, xblock):
# Create a submission from the user
student_item = xblock.get_student_item_dict()
submission = self.create_test_submission(
xblock, submission_text=submission_text
xblock, student_item=student_item, submission_text=self.SUBMISSION
)

self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT)

context = {"response": submission, "step": "staff"}
# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data

# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])

step_score = AssessmentScoreSerializer(
xblock.api_data.staff_assessment_data.assessment, context=context
).data

self.assertEqual(data["staff"]["stepScore"], step_score)

@scenario("data/grade_scenario.xml", user_id="Alan")
def test_assessment_step_assessment_data(self, xblock):
submission_text = ["Foo", "Bar"]
submission = self.create_test_submission(
xblock, submission_text=submission_text
# Create submissions from other users
scorer_subs = self.create_peer_submissions(
student_item, self.PEERS, self.SUBMISSION
)

graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"]
for scorer_sub, scorer_name, assessment in list(
zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS)
):
self.create_peer_assessment(
scorer_sub,
scorer_name,
submission,
assessment,
xblock.rubric_criteria,
graded_by,
)

# Create a staff override
self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT)

context = {"response": submission, "step": "staff"}
context = {"response": submission, "step": "done"}

# When I load my response
data = AssessmentGradeSerializer(xblock.api_data, context=context).data

# I get the appropriate response
self.assertEqual(context["step"], data["effectiveAssessmentType"])
# Then I get the appropriate assessment data
expected_assessment_type = "staff"
self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"])

assessment_data = AssessmentDataSerializer(
xblock.api_data.staff_assessment_data.assessment, context=context
).data
score_details = data[expected_assessment_type]
# Feedback is disabled in this assignment
self.assertNestedDictEquals(score_details["assessment"], {
"overallFeedback": STAFF_GOOD_ASSESSMENT["overall_feedback"],
"criteria": [
{
"selectedOption": 0,
"feedback": '',
},
{
"selectedOption": 1,
"feedback": '',
}
]
})
self.assertDictEqual(score_details["stepScore"], {"earned": 5, "possible": 6})

self.assertEqual(data["staff"]["assessment"], assessment_data)
# With peer responses all listed as unweighted
self.assertDictEqual(data["peer"], {'stepScore': None, 'assessments': []})
self.assertIsNone(data["peerUnweighted"]['stepScore'])
self.assertEqual(len(data["peerUnweighted"]['assessments']), len(self.PEERS))


class TestAssessmentScoreSerializer(TestCase):
Expand Down
2 changes: 1 addition & 1 deletion package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "edx-ora2",
"version": "6.0.0",
"version": "6.0.1",
"repository": "https://github.com/openedx/edx-ora2.git",
"dependencies": {
"@edx/frontend-build": "^6.1.1",
Expand Down

0 comments on commit 9bcad98

Please sign in to comment.