diff --git a/openassessment/xblock/data_layer/data_layer_mixin.py b/openassessment/xblock/data_layer/data_layer_mixin.py new file mode 100644 index 0000000000..d4ad40920a --- /dev/null +++ b/openassessment/xblock/data_layer/data_layer_mixin.py @@ -0,0 +1,15 @@ +""" +Data layer for ORA + +XBlock handlers which surface info about an ORA, instead of being tied to views. +""" +from xblock.core import XBlock + +from openassessment.xblock.data_layer.serializers import OraBlockInfoSerializer + + +class DataLayerMixin: + @XBlock.json_handler + def get_block_info(self, data, suffix=""): # pylint: disable=unused-argument + block_info = OraBlockInfoSerializer(self) + return block_info.data diff --git a/openassessment/xblock/data_layer/serializers.py b/openassessment/xblock/data_layer/serializers.py new file mode 100644 index 0000000000..e42eec908b --- /dev/null +++ b/openassessment/xblock/data_layer/serializers.py @@ -0,0 +1,214 @@ +from rest_framework.serializers import ( + BooleanField, + DateTimeField, + IntegerField, + Serializer, + CharField, + ListField, + SerializerMethodField, +) + + +class CharListField(ListField): + child = CharField() + + +class IsRequiredField(BooleanField): + """ + Utility for checking if a field is "required" to reduce repeated code. + """ + + def to_representation(self, value): + return value == "required" + + +class TextResponseConfigSerializer(Serializer): + enabled = SerializerMethodField() + required = IsRequiredField(source="text_response") + editorType = CharField(source="text_response_editor") + allowLatexPreview = BooleanField(source="allow_latex") + + def get_enabled(self, block): + return block.text_response is not None + + +class FileResponseConfigSerializer(Serializer): + enabled = SerializerMethodField() + required = IsRequiredField(source="file_upload_response") + fileUploadLimit = SerializerMethodField() + allowedExtensions = CharListField(source="get_allowed_file_types_or_preset") + blockedExtensions = CharListField(source="FILE_EXT_BLACK_LIST") + fileTypeDescription = CharField(source="file_upload_type") + + def get_enabled(self, block): + return block.file_upload_response is not None + + def get_fileUploadLimit(self, block): + if not block.allow_multiple_files: + return 1 + return block.MAX_FILES_COUNT + + +class TeamsConfigSerializer(Serializer): + enabled = BooleanField(source="is_team_assignment") + teamsetName = SerializerMethodField() + + def get_teamsetName(self, block): + if block.teamset_config is not None: + return block.teamset_config.name + + +class SubmissionConfigSerializer(Serializer): + startDatetime = DateTimeField(source="submission_start") + endDatetime = DateTimeField(source="submission_due") + + textResponseConfig = TextResponseConfigSerializer(source="*") + fileResponseConfig = FileResponseConfigSerializer(source="*") + + teamsConfig = TeamsConfigSerializer(source="*") + + +class RubricFeedbackConfigSerializer(Serializer): + description = CharField(source="rubric_feedback_prompt") # is this this field? + defaultText = CharField(source="rubric_feedback_default_text") + + +class RubricCriterionOptionSerializer(Serializer): + name = CharField() + label = CharField() + points = IntegerField() + description = CharField(source="explanation") + + +class RubricCriterionSerializer(Serializer): + name = CharField(source="label") + description = CharField(source="prompt") + feedbackEnabled = SerializerMethodField() + feedbackRequired = IsRequiredField(source="feedback") + options = RubricCriterionOptionSerializer(many=True) + + @staticmethod + def _feedback(criterion): + # Feedback is disabled as a default + return criterion.get("feedback", "disabled") + + def get_feedbackEnabled(self, criterion): + # Feedback can be specified as optional or required + return self._feedback(criterion) != "disabled" + + +class RubricConfigSerializer(Serializer): + showDuringResponse = BooleanField(source="show_rubric_during_response") + feedbackConfig = RubricFeedbackConfigSerializer(source="*") + criteria = RubricCriterionSerializer( + many=True, source="rubric_criteria_with_labels" + ) + + +class SelfSettingsSerializer(Serializer): + required = BooleanField(default=True) + + startTime = DateTimeField(source="start") + endTime = DateTimeField(source="due") + + +class PeerSettingsSerializer(Serializer): + required = BooleanField(default=True) + + startTime = DateTimeField(source="start") + endTime = DateTimeField(source="due") + + minNumberToGrade = IntegerField(source="must_grade") + minNumberToBeGradedBy = IntegerField(source="must_be_graded_by") + + enableFlexibleGrading = BooleanField( + source="enable_flexible_grading", required=False + ) + + +class AssessmentStepSettingsSerializer(Serializer): + """ + Generic Assessments step, where we just need to know if the step is + required given the ora.rubric_assessments soruce. + """ + + required = BooleanField(default=True) + + def _get_step(self, rubric_assessments, step_name): + """Get the assessment step config for a given step_name""" + for step in rubric_assessments: + if step["name"] == step_name: + return step + return None + + def __init__(self, *args, **kwargs): + self.step_name = kwargs.pop("step_name") + return super().__init__(*args, **kwargs) + + def to_representation(self, rubric_assessments): + assessment_step = self._get_step(rubric_assessments, self.step_name) + + # Special handling for the peer step which includes extra fields + if assessment_step and self.step_name == "peer-assessment": + return PeerSettingsSerializer(assessment_step).data + elif assessment_step and self.step_name == "self-assessment": + return SelfSettingsSerializer(assessment_step).data + + # If we didn't find a step, it is not required + if assessment_step is None: + assessment_step = {"required": False} + + return super().to_representation(assessment_step) + + +class AssessmentStepsSettingsSerializer(Serializer): + training = AssessmentStepSettingsSerializer( + step_name="student-training", source="rubric_assessments" + ) + peer = AssessmentStepSettingsSerializer( + step_name="peer-assessment", source="rubric_assessments" + ) + # Workaround to allow reserved keyword in serializer key + vars()["self"] = AssessmentStepSettingsSerializer( + step_name="self-assessment", source="rubric_assessments" + ) + staff = AssessmentStepSettingsSerializer( + step_name="staff-assessment", source="rubric_assessments" + ) + + +class AssessmentStepsSerializer(Serializer): + order = SerializerMethodField() + settings = AssessmentStepsSettingsSerializer(source="*") + + def get_order(self, block): + return [step["name"] for step in block.rubric_assessments] + + +class LeaderboardConfigSerializer(Serializer): + enabled = SerializerMethodField() + numberOfEntries = IntegerField(source="leaderboard_show") + + def get_enabled(self, block): + return block.leaderboard_show > 0 + + +class OraBlockInfoSerializer(Serializer): + """ + Main serializer for statically-defined ORA Block information + """ + + title = CharField() + prompts = SerializerMethodField(source="*") + baseAssetUrl = SerializerMethodField(source="*") + + submissionConfig = SubmissionConfigSerializer(source="*") + assessmentSteps = AssessmentStepsSerializer(source="*") + rubricConfig = RubricConfigSerializer(source="*") + leaderboardConfig = LeaderboardConfigSerializer(source="*") + + def get_baseAssetUrl(self, block): + return block._get_base_url_path_for_course_assets(block.course.id) + + def get_prompts(self, block): + return [prompt["description"] for prompt in block.prompts] diff --git a/openassessment/xblock/data_layer/test_serializers.py b/openassessment/xblock/data_layer/test_serializers.py new file mode 100644 index 0000000000..6200f24027 --- /dev/null +++ b/openassessment/xblock/data_layer/test_serializers.py @@ -0,0 +1,445 @@ +""" +Tests for data layer of ORA XBlock +""" + +from unittest.mock import MagicMock + +import ddt + + +from openassessment.xblock.data_layer.serializers import ( + AssessmentStepsSerializer, + LeaderboardConfigSerializer, + RubricConfigSerializer, + SubmissionConfigSerializer, +) +from openassessment.xblock.test.base import XBlockHandlerTestCase, scenario +from openassessment.xblock.test.test_team import ( + MockTeamsConfigurationService, + MockTeamsService, +) + + +class TestSubmissionConfigSerializer(XBlockHandlerTestCase): + """ + Test for SubmissionConfigSerializer + """ + + def _enable_team_ora(self, xblock): + """Utility function for mocking team dependencies on the passed xblock""" + xblock.is_team_assignment = MagicMock(return_value=True) + + xblock.teamset_config = MagicMock() + xblock.teamset_config.name = xblock.selected_teamset_id + + @scenario("data/submission_open.xml") + def test_dates(self, xblock): + # Given an individual (non-teams) ORA + xblock.teamset_config = MagicMock(return_value=None) + + # When I ask for the submission config + submission_config = SubmissionConfigSerializer(xblock).data + + # Then I get the expected values + expected_start = xblock.submission_start + expected_due = xblock.submission_due + self.assertEqual(submission_config["startDatetime"], expected_start) + self.assertEqual(submission_config["endDatetime"], expected_due) + + @scenario("data/basic_scenario.xml") + def test_dates_missing(self, xblock): + # Given an individual (non-teams) ORA + xblock.teamset_config = MagicMock(return_value=None) + + # When I ask for submission config + submission_config = SubmissionConfigSerializer(xblock).data + + # Then I get the expected values + self.assertIsNone(submission_config["startDatetime"]) + self.assertIsNone(submission_config["endDatetime"]) + + @scenario("data/basic_scenario.xml") + def test_text_response_config(self, xblock): + # Given an individual (non-teams) ORA with a text response + xblock.teamset_config = MagicMock(return_value=None) + + # When I ask for text response config + submission_config = SubmissionConfigSerializer(xblock).data + text_response_config = submission_config["textResponseConfig"] + + # Then I get the expected values + self.assertTrue(text_response_config["enabled"]) + self.assertTrue(text_response_config["required"]) + self.assertEqual(text_response_config["editorType"], "text") + self.assertFalse(text_response_config["allowLatexPreview"]) + + @scenario("data/basic_scenario.xml") + def test_html_response_config(self, xblock): + # Given an individual (non-teams) ORA with an html response + xblock.teamset_config = MagicMock(return_value=None) + xblock.text_response_editor = "html" + + # When I ask for text response config + submission_config = SubmissionConfigSerializer(xblock).data + text_response_config = submission_config["textResponseConfig"] + + # Then I get the expected values + self.assertEqual(text_response_config["editorType"], "html") + + @scenario("data/basic_scenario.xml") + def test_latex_preview(self, xblock): + # Given an individual (non-teams) ORA + xblock.teamset_config = MagicMock(return_value=None) + # ... with latex preview enabled + xblock.allow_latex = True + + # When I ask for text response config + submission_config = SubmissionConfigSerializer(xblock).data + text_response_config = submission_config["textResponseConfig"] + + # Then I get the expected values + self.assertTrue(text_response_config["allowLatexPreview"]) + + @scenario("data/file_upload_scenario.xml") + def test_file_response_config(self, xblock): + # Given an individual (non-teams) ORA with file upload enabled + xblock.teamset_config = MagicMock(return_value=None) + + # When I ask for file upload config + submission_config = SubmissionConfigSerializer(xblock).data + file_response_config = submission_config["fileResponseConfig"] + + # Then I get the expected values + self.assertTrue(file_response_config["enabled"]) + self.assertEqual( + file_response_config["fileUploadLimit"], xblock.MAX_FILES_COUNT + ) + self.assertEqual( + file_response_config["fileTypeDescription"], + xblock.file_upload_type, + ) + self.assertEqual( + file_response_config["allowedExtensions"], + xblock.get_allowed_file_types_or_preset(), + ) + self.assertEqual( + file_response_config["blockedExtensions"], xblock.FILE_EXT_BLACK_LIST + ) + + @scenario("data/team_submission.xml") + def test_team_ora_config(self, xblock): + # Given a team ORA + self._enable_team_ora(xblock) + + # When I ask for teams config + submission_config = SubmissionConfigSerializer(xblock).data + teams_config = submission_config["teamsConfig"] + + # Then I get the expected values + self.assertTrue(teams_config["enabled"]) + self.assertEqual(teams_config["teamsetName"], xblock.selected_teamset_id) + + +@ddt.ddt +class TestRubricConfigSerializer(XBlockHandlerTestCase): + """ + Test for RubricConfigSerializer + """ + + @ddt.data(True, False) + @scenario("data/basic_scenario.xml") + def test_show_during_response(self, xblock, mock_show_rubric): + # Given a basic setup where I do/not have rubric shown during response + xblock.show_rubric_during_response = mock_show_rubric + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the right values + self.assertEqual(rubric_config["showDuringResponse"], mock_show_rubric) + + @scenario("data/feedback_only_criterion_staff.xml") + def test_overall_feedback(self, xblock): + # Given an ORA block with one criterion + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the expected defaults + criteria = rubric_config["criteria"] + criterion = criteria[0] + self.assertEqual(len(criteria), 1) + self.assertEqual(criterion["name"], "vocabulary") + self.assertEqual( + criterion["description"], + "This criterion accepts only written feedback, so it has no options", + ) + + # ... In this example, feedback is required + self.assertTrue(criterion["feedbackEnabled"]) + self.assertTrue(criterion["feedbackRequired"]) + + @scenario("data/feedback_only_criterion_staff.xml") + def test_criterion(self, xblock): + # Given an ORA block with one criterion + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the expected defaults + criteria = rubric_config["criteria"] + criterion = criteria[0] + self.assertEqual(len(criteria), 1) + self.assertEqual(criterion["name"], "vocabulary") + self.assertEqual( + criterion["description"], + "This criterion accepts only written feedback, so it has no options", + ) + + # ... In this example, feedback is required + self.assertTrue(criterion["feedbackEnabled"]) + self.assertTrue(criterion["feedbackRequired"]) + + @scenario("data/feedback_only_criterion_self.xml") + def test_criterion_disabled_required(self, xblock): + # Given an ORA block with two criterion + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the expected defaults + criteria = rubric_config["criteria"] + + # .. the first criterion has feedback disabled + self.assertFalse(criteria[0]["feedbackEnabled"]) + self.assertFalse(criteria[0]["feedbackRequired"]) + + # .. the first criterion has feedback required + self.assertTrue(criteria[1]["feedbackEnabled"]) + self.assertTrue(criteria[1]["feedbackRequired"]) + + @scenario("data/file_upload_missing_scenario.xml") + def test_criterion_optional(self, xblock): + # Given an ORA block with one criterion, feedback optional + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the feedback enabled / required values + criteria = rubric_config["criteria"] + criterion = criteria[0] + self.assertTrue(criterion["feedbackEnabled"]) + self.assertFalse(criterion["feedbackRequired"]) + + @scenario("data/basic_scenario.xml") + def test_criteria(self, xblock): + # Given an ORA block with multiple criteria + expected_criteria = xblock.rubric_criteria + + # When I ask for rubric config + rubric_config = RubricConfigSerializer(xblock).data + + # Then I get the expected number of criteria + criteria = rubric_config["criteria"] + self.assertEqual(len(criteria), len(expected_criteria)) + + @scenario("data/basic_scenario.xml") + def test_feedback_config(self, xblock): + # Given an ORA block with feedback + xblock.rubric_feedback_prompt = "foo" + xblock.rubric_feedback_default_text = "bar" + + # When I ask for rubric config + feedback_config = RubricConfigSerializer(xblock).data["feedbackConfig"] + + # Then I get the expected defaults + self.assertEqual(feedback_config["description"], xblock.rubric_feedback_prompt) + self.assertEqual( + feedback_config["defaultText"], xblock.rubric_feedback_default_text + ) + + +class TestAssessmentStepsSerializer(XBlockHandlerTestCase): + """ + Test for AssessmentStepsSerializer + """ + + @scenario("data/basic_scenario.xml") + def test_order(self, xblock): + # Given a basic setup + expected_order = ["peer-assessment", "self-assessment"] + expected_step_keys = {"training", "peer", "self", "staff"} + + # When I ask for assessment step config + steps_config = AssessmentStepsSerializer(xblock).data + + # Then I get the right ordering and step keys + self.assertListEqual(steps_config["order"], expected_order) + steps = {step for step in steps_config["settings"].keys()} + self.assertSetEqual(steps, expected_step_keys) + + +class TestPeerSettingsSerializer(XBlockHandlerTestCase): + """Tests for PeerSettingsSerializer""" + + step_config_key = "peer" + + @scenario("data/basic_scenario.xml") + def test_peer_settings(self, xblock): + # Given a basic setup + expected_must_grade = 5 + expected_grade_by = 3 + + # When I ask for peer step config + peer_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertEqual(peer_config["minNumberToGrade"], expected_must_grade) + self.assertEqual(peer_config["minNumberToBeGradedBy"], expected_grade_by) + + @scenario("data/dates_scenario.xml") + def test_peer_dates(self, xblock): + # Given a basic setup + expected_start = "2015-01-02T00:00:00" + expected_due = "2015-04-01T00:00:00" + + # When I ask for peer step config + peer_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right dates + self.assertEqual(peer_config["startTime"], expected_start) + self.assertEqual(peer_config["endTime"], expected_due) + + @scenario("data/peer_assessment_flex_grading_scenario.xml") + def test_flex_grading(self, xblock): + # Given a peer step with flex grading + + # When I ask for peer step config + peer_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right steps and ordering + self.assertTrue(peer_config["enableFlexibleGrading"]) + + +class TestTrainingSettingsSerializer(XBlockHandlerTestCase): + """ + Test for TrainingSettingsSerializer + """ + + step_config_key = "training" + + @scenario("data/student_training.xml") + def test_enabled(self, xblock): + # Given an ORA with a training step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertTrue(step_config["required"]) + + @scenario("data/basic_scenario.xml") + def test_disabled(self, xblock): + # Given an ORA without a training step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertFalse(step_config["required"]) + + +class TestSelfSettingsSerializer(XBlockHandlerTestCase): + """ + Test for SelfSettingsSerializer + """ + + step_config_key = "self" + + @scenario("data/self_assessment_scenario.xml") + def test_enabled(self, xblock): + # Given an ORA with a self assessment step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertTrue(step_config["required"]) + + @scenario("data/peer_only_scenario.xml") + def test_disabled(self, xblock): + # Given an ORA without a self assessment step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertFalse(step_config["required"]) + + +class TestStaffSettingsSerializer(XBlockHandlerTestCase): + """ + Test for StaffSettingsSerializer + """ + + step_config_key = "staff" + + @scenario("data/staff_grade_scenario.xml") + def test_enabled(self, xblock): + # Given an ORA with a staff assessment step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertTrue(step_config["required"]) + + @scenario("data/peer_only_scenario.xml") + def test_disabled(self, xblock): + # Given an ORA without a staff assessment step + # When I ask for step config + step_config = AssessmentStepsSerializer(xblock).data["settings"][ + self.step_config_key + ] + + # Then I get the right config + self.assertFalse(step_config["required"]) + + +class TestLeaderboardConfigSerializer(XBlockHandlerTestCase): + """ + Test for LeaderboardConfigSerializer + """ + + @scenario("data/leaderboard_show.xml") + def test_leaderboard(self, xblock): + # Given I have a leaderboard configured + number_to_show = xblock.leaderboard_show + + # When I ask for leaderboard config + leaderboard_config = LeaderboardConfigSerializer(xblock).data + + # Then I get the expected config + self.assertTrue(leaderboard_config["enabled"]) + self.assertEqual(leaderboard_config["numberOfEntries"], number_to_show) + + @scenario("data/basic_scenario.xml") + def test_no_leaderboard(self, xblock): + # Given I don't have a leaderboard configured + # When I ask for leaderboard config + leaderboard_config = LeaderboardConfigSerializer(xblock).data + + # Then I get the expected config + self.assertFalse(leaderboard_config["enabled"]) + self.assertEqual(leaderboard_config["numberOfEntries"], 0) diff --git a/openassessment/xblock/openassessmentblock.py b/openassessment/xblock/openassessmentblock.py index 17a399e597..30727c8673 100644 --- a/openassessment/xblock/openassessmentblock.py +++ b/openassessment/xblock/openassessmentblock.py @@ -26,6 +26,7 @@ from openassessment.workflow.errors import AssessmentWorkflowError from openassessment.xblock.course_items_listing_mixin import CourseItemsListingMixin from openassessment.xblock.data_conversion import create_prompts_list, create_rubric_dict, update_assessments_format +from openassessment.xblock.data_layer.data_layer_mixin import DataLayerMixin from openassessment.xblock.defaults import * # pylint: disable=wildcard-import, unused-wildcard-import from openassessment.xblock.grade_mixin import GradeMixin from openassessment.xblock.leaderboard_mixin import LeaderboardMixin @@ -106,26 +107,29 @@ def load(path): @XBlock.needs("user_state") @XBlock.needs("teams") @XBlock.needs("teams_configuration") -class OpenAssessmentBlock(MessageMixin, - SubmissionMixin, - PeerAssessmentMixin, - SelfAssessmentMixin, - StaffAssessmentMixin, - StudioMixin, - GradeMixin, - LeaderboardMixin, - StaffAreaMixin, - WorkflowMixin, - TeamWorkflowMixin, - StudentTrainingMixin, - LmsCompatibilityMixin, - CourseItemsListingMixin, - ConfigMixin, - TeamMixin, - OpenAssessmentTemplatesMixin, - RubricReuseMixin, - StaffGraderMixin, - XBlock): +class OpenAssessmentBlock( + MessageMixin, + SubmissionMixin, + PeerAssessmentMixin, + SelfAssessmentMixin, + StaffAssessmentMixin, + StudioMixin, + GradeMixin, + LeaderboardMixin, + StaffAreaMixin, + WorkflowMixin, + TeamWorkflowMixin, + StudentTrainingMixin, + LmsCompatibilityMixin, + CourseItemsListingMixin, + ConfigMixin, + TeamMixin, + OpenAssessmentTemplatesMixin, + RubricReuseMixin, + StaffGraderMixin, + DataLayerMixin, + XBlock, +): """Displays a prompt and provides an area where students can compose a response.""" VALID_ASSESSMENT_TYPES = [ diff --git a/openassessment/xblock/test/data/peer_assessment_flex_grading_scenario.xml b/openassessment/xblock/test/data/peer_assessment_flex_grading_scenario.xml new file mode 100644 index 0000000000..74a8fbe966 --- /dev/null +++ b/openassessment/xblock/test/data/peer_assessment_flex_grading_scenario.xml @@ -0,0 +1,49 @@ + + Open Assessment Test + + + Given the state of the world today, what do you think should be done to combat poverty? + + + Given the state of the world today, what do you think should be done to combat pollution? + + + + + 𝓒𝓸𝓷𝓬𝓲𝓼𝓮 + How concise is it? + + + + + + Form + How well-formed is it? + + + + + + + + + + diff --git a/package-lock.json b/package-lock.json index b2a27283eb..9d7667e032 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "edx-ora2", - "version": "5.1.0", + "version": "5.2.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "edx-ora2", - "version": "4.5.0", + "version": "5.2.1", "dependencies": { "@edx/frontend-build": "^6.1.1", "@edx/paragon": "^20.9.2", diff --git a/package.json b/package.json index 3b55fa754f..3edddcf451 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "5.2.1", + "version": "5.2.2", "repository": "https://github.com/openedx/edx-ora2.git", "dependencies": { "@edx/frontend-build": "^6.1.1",