Skip to content

Commit

Permalink
refactor: change name from score type to grading strategy accordingly
Browse files Browse the repository at this point in the history
  • Loading branch information
mariajgrimaldi committed Mar 25, 2024
1 parent 22d8d95 commit 94a83e3
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 16 deletions.
14 changes: 8 additions & 6 deletions openassessment/assessment/api/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ def flexible_peer_grading_enabled(peer_requirements, course_settings):
return peer_requirements.get("enable_flexible_grading")


def get_peer_score_type(peer_requirements):
def get_peer_grading_strategy(peer_requirements):
"""
Get the peer grading type, either mean or median.
Get the peer grading type, either mean or median. Default is median.
"""
return peer_requirements.get("grading_strategy", GradingStrategy.MEDIAN)

Expand Down Expand Up @@ -273,8 +273,8 @@ def get_score(submission_uuid, peer_requirements, course_settings):
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]
score_type = get_peer_score_type(peer_requirements)
scores_dict = get_peer_assessment_scores(submission_uuid, score_type)
grading_strategy = get_peer_grading_strategy(peer_requirements)
scores_dict = get_peer_assessment_scores(submission_uuid, grading_strategy)
return {
"points_earned": sum(scores_dict.values()),
"points_possible": assessments[0].points_possible,
Expand Down Expand Up @@ -492,7 +492,7 @@ def get_rubric_max_scores(submission_uuid):
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex

def get_peer_assessment_scores(submission_uuid, score_type="median"):
def get_peer_assessment_scores(submission_uuid, grading_strategy="median"):
"""Get the median/mean score for each rubric criterion
For a given assessment, collect the median/mean score for each criterion on the
Expand All @@ -507,6 +507,8 @@ def get_peer_assessment_scores(submission_uuid, score_type="median"):
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median/mean score.
grading_strategy (str): The grading strategy to use when calculating
the median/mean score. Default is "median".
Returns:
dict: A dictionary of rubric criterion names,
Expand All @@ -521,7 +523,7 @@ def get_peer_assessment_scores(submission_uuid, score_type="median"):
items = workflow.graded_by.filter(scored=True)
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_score_dict(scores, score_type=score_type)
return Assessment.get_score_dict(scores, grading_strategy=grading_strategy)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError as ex:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
<option value="median" {% if assessments.peer_assessment.grading_strategy == 'median' %}selected="true"{% endif %}>{% trans "Median (default)" %}</option>
</select>
</div>
<p class="setting-help">{% trans "Select the preferred grading strategy." %}</p>
<p class="setting-help">{% trans "Select the preferred grading strategy for the peer assessment. By default, the median across all peer reviews is used to calculate the final grade. If you select the mean, the average of all peer reviews will be used." %}</p>
</li>
{% endif %}
</ul>
Expand Down
18 changes: 9 additions & 9 deletions openassessment/xblock/grade_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from xblock.core import XBlock

from django.utils.translation import gettext as _
from openassessment.assessment.api.peer import get_peer_score_type
from openassessment.assessment.api.peer import get_peer_grading_strategy

from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError

Expand Down Expand Up @@ -302,8 +302,8 @@ def has_feedback(assessments):
if staff_assessment:
median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
elif "peer-assessment" in assessment_steps:
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type)
grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, grading_strategy)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

Expand Down Expand Up @@ -369,10 +369,10 @@ def _get_assessment_part(title, feedback_title, part_criterion_name, assessment)
criterion_name,
staff_assessment
)
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"])
if "peer-assessment" in assessment_steps:
peer_assessment_part = {
'title': _(f'Peer {score_type.capitalize()} Grade'),
'title': _(f'Peer {grading_strategy.capitalize()} Grade'),
'criterion': criterion,
'option': self._peer_median_option(submission_uuid, criterion),
'individual_assessments': [
Expand Down Expand Up @@ -427,8 +427,8 @@ def _peer_median_option(self, submission_uuid, criterion):
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api

score_type = get_peer_score_type(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type)
grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, grading_strategy)
median_score = median_scores.get(criterion['name'], None)
median_score = -1 if median_score is None else median_score

Expand Down Expand Up @@ -654,11 +654,11 @@ def _get_score_explanation(self, workflow):
complete = score is not None

assessment_type = self._get_assessment_type(workflow)
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
grading_strategy = get_peer_grading_strategy(self.workflow_requirements()["peer"])
sentences = {
"staff": _("The grade for this problem is determined by your Staff Grade."),
"peer": _(
f"The grade for this problem is determined by the {score_type} score of "
f"The grade for this problem is determined by the {grading_strategy} score of "
"your Peer Assessments."
),
"self": _("The grade for this problem is determined by your Self Assessment.")
Expand Down

0 comments on commit 94a83e3

Please sign in to comment.