From 94a83e34cde8d9d563d02a54ca5e3fdaa8e75876 Mon Sep 17 00:00:00 2001 From: Maria Grimaldi Date: Mon, 25 Mar 2024 13:50:09 -0400 Subject: [PATCH] refactor: change name from score type to grading strategy accordingly --- openassessment/assessment/api/peer.py | 14 ++++++++------ .../legacy/edit/oa_edit_peer_assessment.html | 2 +- openassessment/xblock/grade_mixin.py | 18 +++++++++--------- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/openassessment/assessment/api/peer.py b/openassessment/assessment/api/peer.py index bcfe4b0c48..91955c3ff9 100644 --- a/openassessment/assessment/api/peer.py +++ b/openassessment/assessment/api/peer.py @@ -43,9 +43,9 @@ def flexible_peer_grading_enabled(peer_requirements, course_settings): return peer_requirements.get("enable_flexible_grading") -def get_peer_score_type(peer_requirements): +def get_peer_grading_strategy(peer_requirements): """ - Get the peer grading type, either mean or median. + Get the peer grading type, either mean or median. Default is median. """ return peer_requirements.get("grading_strategy", GradingStrategy.MEDIAN) @@ -273,8 +273,8 @@ def get_score(submission_uuid, peer_requirements, course_settings): scored_item.scored = True scored_item.save() assessments = [item.assessment for item in items] - score_type = get_peer_score_type(peer_requirements) - scores_dict = get_peer_assessment_scores(submission_uuid, score_type) + grading_strategy = get_peer_grading_strategy(peer_requirements) + scores_dict = get_peer_assessment_scores(submission_uuid, grading_strategy) return { "points_earned": sum(scores_dict.values()), "points_possible": assessments[0].points_possible, @@ -492,7 +492,7 @@ def get_rubric_max_scores(submission_uuid): logger.exception(error_message) raise PeerAssessmentInternalError(error_message) from ex -def get_peer_assessment_scores(submission_uuid, score_type="median"): +def get_peer_assessment_scores(submission_uuid, grading_strategy="median"): """Get the median/mean score for each rubric criterion For a given assessment, collect the median/mean score for each criterion on the @@ -507,6 +507,8 @@ def get_peer_assessment_scores(submission_uuid, score_type="median"): submission_uuid (str): The submission uuid is used to get the assessments used to score this submission, and generate the appropriate median/mean score. + grading_strategy (str): The grading strategy to use when calculating + the median/mean score. Default is "median". Returns: dict: A dictionary of rubric criterion names, @@ -521,7 +523,7 @@ def get_peer_assessment_scores(submission_uuid, score_type="median"): items = workflow.graded_by.filter(scored=True) assessments = [item.assessment for item in items] scores = Assessment.scores_by_criterion(assessments) - return Assessment.get_score_dict(scores, score_type=score_type) + return Assessment.get_score_dict(scores, grading_strategy=grading_strategy) except PeerWorkflow.DoesNotExist: return {} except DatabaseError as ex: diff --git a/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html b/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html index 9c17341598..7a10a45c81 100644 --- a/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html +++ b/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html @@ -63,7 +63,7 @@ -

{% trans "Select the preferred grading strategy." %}

+

{% trans "Select the preferred grading strategy for the peer assessment. By default, the median across all peer reviews is used to calculate the final grade. If you select the mean, the average of all peer reviews will be used." %}

{% endif %} diff --git a/openassessment/xblock/grade_mixin.py b/openassessment/xblock/grade_mixin.py index db78bdfa11..689a7af3ed 100644 --- a/openassessment/xblock/grade_mixin.py +++ b/openassessment/xblock/grade_mixin.py @@ -9,7 +9,7 @@ from xblock.core import XBlock from django.utils.translation import gettext as _ -from openassessment.assessment.api.peer import get_peer_score_type +from openassessment.assessment.api.peer import get_peer_grading_strategy from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError @@ -302,8 +302,8 @@ def has_feedback(assessments): if staff_assessment: median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid) elif "peer-assessment" in assessment_steps: - score_type = get_peer_score_type(self.workflow_requirements()["peer"]) - median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type) + grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"]) + median_scores = peer_api.get_peer_assessment_scores(submission_uuid, grading_strategy) elif "self-assessment" in assessment_steps: median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid) @@ -369,10 +369,10 @@ def _get_assessment_part(title, feedback_title, part_criterion_name, assessment) criterion_name, staff_assessment ) - score_type = get_peer_score_type(self.workflow_requirements()["peer"]) + grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"]) if "peer-assessment" in assessment_steps: peer_assessment_part = { - 'title': _(f'Peer {score_type.capitalize()} Grade'), + 'title': _(f'Peer {grading_strategy.capitalize()} Grade'), 'criterion': criterion, 'option': self._peer_median_option(submission_uuid, criterion), 'individual_assessments': [ @@ -427,8 +427,8 @@ def _peer_median_option(self, submission_uuid, criterion): # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api - score_type = get_peer_score_type(self.workflow_requirements()["peer"]) - median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type) + grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"]) + median_scores = peer_api.get_peer_assessment_scores(submission_uuid, grading_strategy) median_score = median_scores.get(criterion['name'], None) median_score = -1 if median_score is None else median_score @@ -654,11 +654,11 @@ def _get_score_explanation(self, workflow): complete = score is not None assessment_type = self._get_assessment_type(workflow) - score_type = get_peer_score_type(self.workflow_requirements()["peer"]) + grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"]) sentences = { "staff": _("The grade for this problem is determined by your Staff Grade."), "peer": _( - f"The grade for this problem is determined by the {score_type} score of " + f"The grade for this problem is determined by the {grading_strategy} score of " "your Peer Assessments." ), "self": _("The grade for this problem is determined by your Self Assessment.")