Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support text exercises #73

Closed
Feuermagier opened this issue Jul 8, 2024 · 2 comments · Fixed by #120
Closed

Support text exercises #73

Feuermagier opened this issue Jul 8, 2024 · 2 comments · Fixed by #120
Assignees

Comments

@Feuermagier
Copy link
Collaborator

Support text exercises. Useful e.g. for automatic correction of very simple text exercises.

@Luro02
Copy link
Collaborator

Luro02 commented Jul 12, 2024

For text exercises I already wrote the code in python, which might help when porting.

Here is what I got:

class TextSubmissionResult(BaseEntity, total=False):
    id: int
    completionDate: str
    successful: bool
    score: float
    rated: bool
    hasFeedback: bool
    submission: TextSubmission
    participation: Participation
    assessor: Optional[User]
    assessmentType: AssessmentType
    hasComplaint: bool
    testCaseCount: int
    passedTestCaseCount: int
    codeIssueCount: int

class NextTextSubmission(BaseEntity, total=False):
    durationInMinutes: int
    empty: bool
    id: int
    participation: Participation
    submissionDate: str
    submissionExerciseType: str # <- must be text
    submitted: bool
    text: str
    type: AssessmentType

class StartAssessmentResponse(BaseEntity, total=False):
    type: str
    id: int
    # initialization_state
    # exercise:
    # result:
    # submissions:
    # empty:
    # durationInMinutes:
    # student:
    # participationIdentifier:
    # participationName

TextBlockType = Literal["AUTOMATIC", "MANUAL"]

class TextBlock:
    submission_id: int
    text: str
    start_index: int
    end_index: int
    number_of_affected_submissions: int = 0
    type: TextBlockType = "AUTOMATIC"

    def __init__(
        self,
        submission_id: int,
        text: str,
        start_index: int,
        end_index: int,
        number_of_affected_submissions: int = 0,
        type: TextBlockType = "AUTOMATIC"
    ) -> None:
        self.submission_id = submission_id
        self.text = text
        self.start_index = start_index
        self.end_index = end_index
        self.number_of_affected_submissions = number_of_affected_submissions
        self.type = type

    # https://github.com/ls1intum/Artemis/blob/e56f7375d711c0fa0e791980b32ea9bd775162ad/src/main/webapp/app/entities/text-block.model.ts#L21
    def compute_id(self) -> str:
        m = hashlib.sha1()
        m.update(f"{self.submission_id};{self.start_index}-{self.end_index};{self.text}".encode("utf-8"))
        return m.hexdigest()

    def to_dict(self) -> dict[str, Union[str, int]]:
        return {
            "id": self.compute_id(),
            "text": self.text,
            "startIndex": self.start_index,
            "endIndex": self.end_index,
            "numberOfAffectedSubmissions": self.number_of_affected_submissions,
            "type": self.type,
        }

class Feedback:
    credits: int
    # a unique id for the feedback (should be generated from TextBlock)
    reference: str
    # the text of the feedback
    detailText: str
    type: TextBlockType

    def __init__(self, credits: int, reference: str, detailText: str, type: TextBlockType = "MANUAL") -> None:
        self.credits = credits
        self.reference = reference
        self.detailText = detailText
        self.type = type
    
    def to_dict(self) -> dict[str, Union[str, int]]:
        return {
            "credits": self.credits,
            "reference": self.reference,
            "detailText": self.detailText,
            "type": self.type,
        }

class AssessmentManager(ArtemisManager):
    async def start(self, exercise_id: int, correction_round: int = 0) -> StartAssessmentResponse:
        """
        Locks the submission for assessment and gets some data from the submission.
        """

        params = {
            "correction-round": str(correction_round),
        }
        # resp = await self._session.get_api_endpoint(
        #     f"/participations/{participation_id}/submissions/{submission_id}/for-text-assessment",
        #     params=params
        # )
        resp = await self._session.get_api_endpoint(
            f"/exercises/{exercise_id}/text-submission-without-assessment?lock=true"
        )
        jdict: StartAssessmentResponse = await resp.json(loads=loads)
        return jdict

    async def submit_assessment(self, participation_id: int, result_id: int, feedbacks: list[Feedback], text_blocks: list[TextBlock]):
        # result_id = 1436
        resp = await self._session.post_api_endpoint(
            f"/participations/{participation_id}/results/{result_id}/submit-text-assessment",
            json = {
                "feedbacks": [i.to_dict() for i in feedbacks],
                "textBlocks": [i.to_dict() for i in text_blocks]
            }
        )

    async def cancel(self, participation_id: int, submission_id: int):
        resp = await self._session.post_api_endpoint(
            f"/participations/{participation_id}/submissions/{submission_id}/cancel-assessment"
        )

        # no response body

class TextSubmissionManager(ArtemisManager):
    async def get_submissions(
        self,
        exercise_id: int,
        filter_submitted_only: bool = False,
        filter_assessed_by_tutor: bool = False,
        correction_round: int = 0,
    ) -> AsyncGenerator[TextSubmission, None]:
        # NOTE: this API is not accessible by a tutor
        params = {
            "submittedOnly": str(filter_submitted_only).lower(),
            "assessedByTutor": str(filter_assessed_by_tutor).lower(),
            "correction-round": str(correction_round),
        }
        resp = await self._session.get_api_endpoint(
            f"/exercises/{exercise_id}/text-submissions",
            params=params
        )
        jdict: list[TextSubmission] = await resp.json(loads=loads)
        for submission in jdict:
            yield submission

    # TODO: replace with ExerciseManager#get_results
    async def get_results(
        self,
        exercise_id: int,
        with_submissions: bool = False,
    ) -> AsyncGenerator[TextSubmissionResult, None]:
        # NOTE: this API only shows the results of already graded submissions!
        params = {
            "withSubmissions": str(with_submissions).lower(),
        }

        resp = await self._session.get_api_endpoint(
            f"/exercises/{exercise_id}/results",
            params=params
        )
        jdict: list[TextSubmissionResult] = await resp.json(loads=loads)
        for submission in jdict:
            yield submission

    async def get_next_submission(self, exercise_id: int, head: bool = True) -> NextTextSubmission:
        params = {
            "head": str(head).lower(),
        }

        resp = await self._session.get_api_endpoint(
            f"/exercises/{exercise_id}/text-submission-without-assessment?lock=true",
            params=params
        )

        jdict = await resp.json(loads=loads)
        return jdict

    async def lock_and_get_submission(
        self, submission_id: int, correction_round: int = 0
    ) -> TextSubmission:
        """
        Locks the submission for assessment and gets the submission.
        :param      submission_id:     The submission identifier
        :type       submission_id:     int
        :param      correction_round:  The correction round, defaults to 0
        :type       correction_round:  int
        """
        params = {"correction-round": str(correction_round)}
        resp = await self._session.get_api_endpoint(
            f"/text-submissions/{submission_id}/lock", params=params
        )
        jdict = await resp.json(loads=loads)
        return jdict

    async def start_assessment(
        self, submission_id: int, correction_round: int = 0
    ) -> None:
        resp = await self._session.get_api_endpoint(
            f"/text-submissions/{submission_id}/"
        )

If I remember correctly, the endpoint for listing all text submissions (get_submissions) doesn't work for tutors.

@Luro02 Luro02 self-assigned this Jul 14, 2024
@Luro02
Copy link
Collaborator

Luro02 commented Jul 14, 2024

I will take care of this, should be implemented before the start of next semester

Luro02 added a commit that referenced this issue Sep 26, 2024
dfuchss added a commit that referenced this issue Sep 27, 2024
* add basic support text exercises #73

* simplify internal code

* apply spotless

* fix minor issue

* Fix issue for merge

* Add missing env

---------

Co-authored-by: Dominik Fuchß <dominik.fuchss@kit.edu>
@github-project-automation github-project-automation bot moved this from Planned in near Future to Done in Artemis & Programming Sep 27, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
Archived in project
Development

Successfully merging a pull request may close this issue.

2 participants