Skip to content

Commit

Permalink
fix: address test failures
Browse files Browse the repository at this point in the history
  • Loading branch information
mariajgrimaldi committed Mar 26, 2024
1 parent c91c520 commit 78549ab
Show file tree
Hide file tree
Showing 30 changed files with 2,994 additions and 1,136 deletions.
48 changes: 32 additions & 16 deletions openassessment/assessment/api/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import logging

from django.conf import settings
from django.db import DatabaseError, IntegrityError, transaction
from django.utils import timezone

Expand All @@ -27,7 +28,7 @@
FLEXIBLE_PEER_GRADING_GRADED_BY_PERCENTAGE = 30


class GradingStrategy:
class PeerGradingStrategy:
"""Grading strategies for peer assessments."""
MEAN = "mean"
MEDIAN = "median"
Expand Down Expand Up @@ -57,11 +58,15 @@ def flexible_peer_grading_active(submission_uuid, peer_requirements, course_sett
return days_elapsed >= FLEXIBLE_PEER_GRADING_REQUIRED_SUBMISSION_AGE_IN_DAYS


def get_peer_grading_strategy(peer_requirements):
def get_peer_grading_strategy(workflow_requirements):
"""
Get the peer grading type, either mean or median. Default is median.
"""
return peer_requirements.get("grading_strategy", GradingStrategy.MEDIAN)
if "peer" not in workflow_requirements:
return workflow_requirements.get("grading_strategy", PeerGradingStrategy.MEDIAN)
return workflow_requirements.get("peer", {}).get(
"grading_strategy", PeerGradingStrategy.MEDIAN,
)


def required_peer_grades(submission_uuid, peer_requirements, course_settings):
Expand Down Expand Up @@ -293,8 +298,11 @@ def get_score(submission_uuid, peer_requirements, course_settings):
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]
grading_strategy = get_peer_grading_strategy(peer_requirements)
scores_dict = get_peer_assessment_scores(submission_uuid, grading_strategy)

scores_dict = get_assessment_scores_with_grading_strategy(
submission_uuid,
peer_requirements,
)
return {
"points_earned": sum(scores_dict.values()),
"points_possible": assessments[0].points_possible,
Expand Down Expand Up @@ -512,23 +520,21 @@ def get_rubric_max_scores(submission_uuid):
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex

def get_peer_assessment_scores(submission_uuid, grading_strategy="median"):
"""Get the median/mean score for each rubric criterion

For a given assessment, collect the median/mean score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
def get_assessment_scores_with_grading_strategy(submission_uuid, workflow_requirements):
"""Get the score for each rubric criterion calculated given grading strategy
obtained from the peer requirements dictionary. If no grading strategy is
provided in the peer requirements or the feature flag is not enabled, the
default median score calculation is used.
If there is a true median/mean score, it is returned. If there are two median/mean
values, the average of those two values is returned, rounded up to the
greatest integer value.
This function is based on get_assessment_median_scores, but allows the caller
to specify the grading strategy (mean, median) to use when calculating the score.
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median/mean score.
grading_strategy (str): The grading strategy to use when calculating
the median/mean score. Default is "median".
workflow_requirements (dict): Dictionary with the key "grading_strategy"
Returns:
dict: A dictionary of rubric criterion names,
Expand All @@ -538,12 +544,21 @@ def get_peer_assessment_scores(submission_uuid, grading_strategy="median"):
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median/mean scores, an error is raised.
"""
# If the feature flag is not enabled, use the median score calculation
# as the default behavior.
if not settings.FEATURES.get("ENABLE_ORA_PEER_CONFIGURABLE_GRADING", False):
return get_assessment_median_scores(submission_uuid)

current_grading_strategy = get_peer_grading_strategy(workflow_requirements)
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
items = workflow.graded_by.filter(scored=True)
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_score_dict(scores, grading_strategy=grading_strategy)
return Assessment.get_score_dict(
scores,
grading_strategy=current_grading_strategy,
)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError as ex:
Expand All @@ -553,6 +568,7 @@ def get_peer_assessment_scores(submission_uuid, grading_strategy="median"):
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex


def get_assessment_median_scores(submission_uuid):
"""Get the median score for each rubric criterion
Expand Down
15 changes: 7 additions & 8 deletions openassessment/assessment/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

from collections import defaultdict
from copy import deepcopy
from django.conf import settings
from hashlib import sha1
import json
import logging
Expand Down Expand Up @@ -490,22 +489,20 @@ def create(cls, rubric, scorer_id, submission_uuid, score_type, feedback=None, s
return cls.objects.create(**assessment_params)

@classmethod
def get_score_dict(cls, scores_dict, score_type="median"):
"""Determine the score in a dictionary of lists of scores based on the score type
if the feature flag is enabled, otherwise use the median score calculation.
def get_score_dict(cls, scores_dict, grading_strategy):
"""Determine the score in a dictionary of lists of scores based on the
grading strategy calculation configuration.
Args:
scores_dict (dict): A dictionary of lists of int values. These int values
are reduced to a single value that represents the median.
score_type (str): The type of score to calculate. Defaults to "median".
grading_strategy (str): The type of score to calculate. Defaults to "median".
Returns:
(dict): A dictionary with criterion name keys and median score
values.
"""
if settings.FEATURES.get('ENABLE_ORA_PEER_CONFIGURABLE_GRADING'):
return getattr(cls, f"get_{score_type}_score_dict")(scores_dict)
return cls.get_median_score_dict(scores_dict)
return getattr(cls, f"get_{grading_strategy}_score_dict")(scores_dict)

@classmethod
def get_median_score_dict(cls, scores_dict):
Expand Down Expand Up @@ -621,6 +618,8 @@ def get_mean_score(scores):
"""
total_criterion_scores = len(scores)
if total_criterion_scores == 0:
return 0
return int(math.ceil(sum(scores) / float(total_criterion_scores)))

@classmethod
Expand Down
139 changes: 137 additions & 2 deletions openassessment/assessment/test/test_peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,21 @@

import copy
import datetime
from unittest.mock import patch
from unittest.mock import Mock, patch

from ddt import ddt, file_data, data, unpack
import pytz

from django.conf import settings
from django.db import DatabaseError, IntegrityError
from django.utils import timezone
from django.test.utils import override_settings
from freezegun import freeze_time
from pytest import raises

from submissions import api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors.peer import PeerAssessmentWorkflowError
from openassessment.assessment.errors.peer import PeerAssessmentInternalError, PeerAssessmentWorkflowError
from openassessment.assessment.models import (
Assessment,
AssessmentFeedback,
Expand Down Expand Up @@ -167,6 +169,11 @@
'force_on_flexible_peer_openassessments': True
}

FEATURES_WITH_GRADING_STRATEGY_ON = settings.FEATURES.copy()
FEATURES_WITH_GRADING_STRATEGY_OFF = settings.FEATURES.copy()
FEATURES_WITH_GRADING_STRATEGY_ON['ENABLE_ORA_PEER_CONFIGURABLE_GRADING'] = True
FEATURES_WITH_GRADING_STRATEGY_OFF['ENABLE_ORA_PEER_CONFIGURABLE_GRADING'] = False


@ddt
class TestPeerApi(CacheResetTest):
Expand Down Expand Up @@ -2230,6 +2237,134 @@ def test_flexible_peer_grading_waiting_on_submitter(self):
self.assertTrue(peer_api.flexible_peer_grading_active(alice_sub['uuid'], requirements, COURSE_SETTINGS))
self._assert_num_scored_items(alice_sub, 4)

@override_settings(FEATURES=FEATURES_WITH_GRADING_STRATEGY_ON)
@patch("openassessment.assessment.api.peer.PeerWorkflow")
@patch("openassessment.assessment.api.peer.Assessment.scores_by_criterion")
@data("mean", "median")
def test_get_peer_score_with_grading_strategy_enabled(
self,
grading_strategy,
scores_by_criterion_mock,
_
):
"""Test get score when grading strategy feature is on.
When grading strategy is enabled, the score is calculated using the
mean or median calculation method based on the grading strategy
configured for the peers step requirement.
"""
workflow_requirements = {
"peer": {
"must_grade": 5,
"must_be_graded_by": 4,
"enable_flexible_grading": False,
"grading_strategy": grading_strategy,
},
}

submission_uuid = "test_submission_uuid"
scores_by_criterion = {
"criterion_1": [6, 8, 15, 29, 37],
"criterion_2": [0, 1, 2, 4, 9]
}
scores_by_criterion_mock.return_value = scores_by_criterion

score_dict = peer_api.get_assessment_scores_with_grading_strategy(submission_uuid, workflow_requirements)

if grading_strategy == "mean":
self.assertDictEqual(score_dict, {"criterion_1": 19, "criterion_2": 4})

if grading_strategy == "median":
self.assertDictEqual(score_dict, {"criterion_1": 15, "criterion_2": 2})

@override_settings(FEATURES=FEATURES_WITH_GRADING_STRATEGY_ON)
@patch("openassessment.assessment.api.peer.PeerWorkflow.objects.get")
@patch("openassessment.assessment.api.peer.Assessment.scores_by_criterion")
@data("mean", "median")
def test_get_peer_score_with_grading_strategy_raise_errors(
self,
grading_strategy,
scores_by_criterion_mock,
peer_workflow_mock
):
"""Test get score when grading strategy feature is on and the
application flow raises errors.
When grading strategy is enabled, the score is calculated using the
mean or median calculation method based on the grading strategy
configured for the peers step requirement.
"""
workflow_requirements = {
"peer": {
"must_grade": 5,
"must_be_graded_by": 4,
"enable_flexible_grading": False,
"grading_strategy": grading_strategy,
},
}

submission_uuid = "test_submission_uuid"
scores_by_criterion = {
"criterion_1": [6, 8, 15, 29, 37],
"criterion_2": [0, 1, 2, 4, 9]
}
scores_by_criterion_mock.return_value = scores_by_criterion
peer_workflow_mock.side_effect = PeerWorkflow.DoesNotExist

self.assertDictEqual(
{},
peer_api.get_assessment_scores_with_grading_strategy(
submission_uuid,
workflow_requirements
)
)

workflow = Mock()
peer_workflow_mock.side_effect = workflow
workflow.return_value.graded_by.filter.return_value = []
scores_by_criterion_mock.side_effect = DatabaseError

with self.assertRaises(PeerAssessmentInternalError):
peer_api.get_assessment_scores_with_grading_strategy(
submission_uuid,
workflow_requirements
)

@override_settings(FEATURES=FEATURES_WITH_GRADING_STRATEGY_OFF)
@patch("openassessment.assessment.api.peer.Assessment")
@patch("openassessment.assessment.api.peer.get_assessment_median_scores")
def test_get_peer_score_with_grading_strategy_disabled(
self,
get_median_scores_mock,
assessment_mock
):
"""Test get score when grading strategy feature is off.
When grading strategy is disabled, the score is calculated using the
median of the peer assessments calculation method.
"""
workflow_requirements = {
"must_grade": 5,
"must_be_graded_by": 4,
"enable_flexible_grading": False,
"grading_strategy": "mean",
}
submission_uuid = "test_submission_uuid"
peer_api.get_assessment_scores_with_grading_strategy(submission_uuid, workflow_requirements)

get_median_scores_mock.assert_called_once_with(submission_uuid)
assessment_mock.get_score_dict.assert_not_called()

def test_mean_score_calculation(self):
"""Test mean score calculation for peer assessments."""
self.assertEqual(0, Assessment.get_mean_score([]))
self.assertEqual(5, Assessment.get_mean_score([5]))
self.assertEqual(6, Assessment.get_mean_score([5, 6]))
self.assertEqual(19, Assessment.get_mean_score([5, 6, 12, 16, 22, 53]))
self.assertEqual(19, Assessment.get_mean_score([6, 5, 12, 53, 16, 22]))
self.assertEqual(31, Assessment.get_mean_score([5, 6, 12, 16, 22, 53, 102]))
self.assertEqual(31, Assessment.get_mean_score([16, 6, 12, 102, 22, 53, 5]))


class PeerWorkflowTest(CacheResetTest):
"""
Expand Down
Binary file modified openassessment/conf/locale/es_419/LC_MESSAGES/django.mo
Binary file not shown.
Loading

0 comments on commit 78549ab

Please sign in to comment.