Skip to content

Commit

Permalink
feat: implement mean score strategy
Browse files Browse the repository at this point in the history
  • Loading branch information
mariajgrimaldi committed Jan 26, 2024
1 parent ab631ae commit df6bf6a
Show file tree
Hide file tree
Showing 28 changed files with 481 additions and 35 deletions.
60 changes: 55 additions & 5 deletions openassessment/assessment/api/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@
FLEXIBLE_PEER_GRADING_GRADED_BY_PERCENTAGE = 30


class GradingStrategy:
"""Grading strategies for peer assessments."""
MEAN = "mean"
MEDIAN = "median"


def flexible_peer_grading_enabled(peer_requirements, course_settings):
"""
Is flexible peer grading turned on? Either at the course override
Expand All @@ -37,6 +43,13 @@ def flexible_peer_grading_enabled(peer_requirements, course_settings):
return peer_requirements.get("enable_flexible_grading")


def get_peer_score_type(peer_requirements):
"""
Get the peer grading type, either mean or median.
"""
return peer_requirements.get("grading_strategy", GradingStrategy.MEDIAN)


def required_peer_grades(submission_uuid, peer_requirements, course_settings):
"""
Given a submission id, finds how many peer assessment required.
Expand Down Expand Up @@ -260,11 +273,10 @@ def get_score(submission_uuid, peer_requirements, course_settings):
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]

score_type = get_peer_score_type(peer_requirements)
scores_dict = get_peer_assessment_scores(submission_uuid, score_type)
return {
"points_earned": sum(
get_assessment_median_scores(submission_uuid).values()
),
"points_earned": sum(scores_dict.values()),
"points_possible": assessments[0].points_possible,
"contributing_assessments": [assessment.id for assessment in assessments],
"staff_id": None,
Expand Down Expand Up @@ -480,6 +492,44 @@ def get_rubric_max_scores(submission_uuid):
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex

def get_peer_assessment_scores(submission_uuid, score_type="median"):
"""Get the median/mean score for each rubric criterion
For a given assessment, collect the median/mean score for each criterion on the
rubric. This set can be used to determine the overall score, as well as each
part of the individual rubric scores.
If there is a true median/mean score, it is returned. If there are two median/mean
values, the average of those two values is returned, rounded up to the
greatest integer value.
Args:
submission_uuid (str): The submission uuid is used to get the
assessments used to score this submission, and generate the
appropriate median/mean score.
Returns:
dict: A dictionary of rubric criterion names,
with a median/mean score of the peer assessments.
Raises:
PeerAssessmentInternalError: If any error occurs while retrieving
information to form the median/mean scores, an error is raised.
"""
try:
workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
items = workflow.graded_by.filter(scored=True)
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_score_dict(scores, score_type=score_type)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError as ex:
error_message = (
"Error getting assessment median scores for submission {uuid}"
).format(uuid=submission_uuid)
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex

def get_assessment_median_scores(submission_uuid):
"""Get the median score for each rubric criterion
Expand Down Expand Up @@ -510,7 +560,7 @@ def get_assessment_median_scores(submission_uuid):
items = workflow.graded_by.filter(scored=True)
assessments = [item.assessment for item in items]
scores = Assessment.scores_by_criterion(assessments)
return Assessment.get_median_score_dict(scores)
return Assessment.get_score_dict(scores)
except PeerWorkflow.DoesNotExist:
return {}
except DatabaseError as ex:
Expand Down
2 changes: 1 addition & 1 deletion openassessment/assessment/api/self.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def get_assessment_scores_by_criteria(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
return Assessment.get_score_dict(scores)
except DatabaseError as ex:
error_message = (
"Error getting self assessment scores for submission {}"
Expand Down
2 changes: 1 addition & 1 deletion openassessment/assessment/api/staff.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def get_assessment_scores_by_criteria(submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
return Assessment.get_score_dict(scores)
except DatabaseError as ex:
error_message = f"Error getting staff assessment scores for {submission_uuid}"
logger.exception(error_message)
Expand Down
2 changes: 1 addition & 1 deletion openassessment/assessment/api/teams.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def get_assessment_scores_by_criteria(team_submission_uuid):
scores = Assessment.scores_by_criterion(assessments)
# Since this is only being sent one score, the median score will be the
# same as the only score.
return Assessment.get_median_score_dict(scores)
return Assessment.get_score_dict(scores)
except DatabaseError as ex:
error_message = f"Error getting staff assessment scores for {team_submission_uuid}"
logger.exception(error_message)
Expand Down
57 changes: 57 additions & 0 deletions openassessment/assessment/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from collections import defaultdict
from copy import deepcopy
from django.conf import settings
from hashlib import sha1
import json
import logging
Expand Down Expand Up @@ -488,6 +489,10 @@ def create(cls, rubric, scorer_id, submission_uuid, score_type, feedback=None, s

return cls.objects.create(**assessment_params)

@classmethod
def get_score_dict(cls, scores_dict, score_type="median"):
return getattr(cls, f"get_{score_type}_score_dict")(scores_dict)

@classmethod
def get_median_score_dict(cls, scores_dict):
"""Determine the median score in a dictionary of lists of scores
Expand Down Expand Up @@ -518,6 +523,36 @@ def get_median_score_dict(cls, scores_dict):
median_scores[criterion] = criterion_score
return median_scores

@classmethod
def get_mean_score_dict(cls, scores_dict):
"""Determine the mean score in a dictionary of lists of scores
For a dictionary of lists, where each list contains a set of scores,
determine the mean value in each list.
Args:
scores_dict (dict): A dictionary of lists of int values. These int
values are reduced to a single value that represents the median.
Returns:
(dict): A dictionary with criterion name keys and mean score
values.
Examples:
>>> scores = {
>>> "foo": [1, 2, 3, 4, 5],
>>> "bar": [6, 7, 8, 9, 10]
>>> }
>>> Assessment.get_mean_score_dict(scores)
{"foo": 3, "bar": 8}
"""
median_scores = {}
for criterion, criterion_scores in scores_dict.items():
criterion_score = Assessment.get_mean_score(criterion_scores)
median_scores[criterion] = criterion_score
return median_scores

@staticmethod
def get_median_score(scores):
"""Determine the median score in a list of scores
Expand Down Expand Up @@ -552,6 +587,28 @@ def get_median_score(scores):
)
return median_score

@staticmethod
def get_mean_score(scores):
"""Determine the median score in a list of scores
Determine the median value in the list.
Args:
scores (list): A list of int values. These int values
are reduced to a single value that represents the median.
Returns:
(int): The median score.
Examples:
>>> scores = 1, 2, 3, 4, 5]
>>> Assessment.get_median_score(scores)
3
"""
total_criterion_scores = len(scores)
return int(math.ceil(sum(scores) / float(total_criterion_scores)))

@classmethod
def scores_by_criterion(cls, assessments):
"""Create a dictionary of lists for scores associated with criterion
Expand Down
4 changes: 2 additions & 2 deletions openassessment/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ def _build_assessment_parts_array(cls, assessment, median_scores):
Args:
assessment - assessment containing the parts that we would like to report on.
median_scores - dictionary with criterion name keys and median score values,
as returned by Assessment.get_median_score_dict()
as returned by Assessment.get_score_dict()
Returns:
OrderedDict that contains an entries for each criterion of the assessment(s).
Expand Down Expand Up @@ -887,7 +887,7 @@ def generate_assessment_data(cls, xblock_id, submission_uuid=None):
)
if assessments:
scores = Assessment.scores_by_criterion(assessments)
median_scores = Assessment.get_median_score_dict(scores)
median_scores = Assessment.get_score_dict(scores)
else:
# If no assessments, just report submission data.
median_scores = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,16 @@
</p>
{% endif %}
</li>
<li class="field comp-setting-entry">
<div class="wrapper-comp-setting">
<label for="peer_assessment_grading_strategy" class="setting-label">{% trans "Grading strategy for the peer assessment" %}</label>
<select id="peer_assessment_grading_strategy" class="input setting-input">
<option value="mean" {% if assessments.peer_assessment.grading_strategy == 'mean' %}selected="true"{% endif %}>{% trans "Mean" %}</option>
<option value="median" {% if assessments.peer_assessment.grading_strategy == 'median' %}selected="true"{% endif %}>{% trans "Median (default)" %}</option>
</select>
</div>
<p class="setting-help">{% trans "Select the preferred grading strategy." %}</p>
</li>
</ul>
</div>
</div>
Expand Down
14 changes: 9 additions & 5 deletions openassessment/xblock/grade_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from xblock.core import XBlock

from django.utils.translation import gettext as _
from openassessment.assessment.api.peer import get_peer_score_type

from openassessment.assessment.errors import PeerAssessmentError, SelfAssessmentError

Expand Down Expand Up @@ -301,7 +302,8 @@ def has_feedback(assessments):
if staff_assessment:
median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
elif "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

Expand Down Expand Up @@ -367,9 +369,10 @@ def _get_assessment_part(title, feedback_title, part_criterion_name, assessment)
criterion_name,
staff_assessment
)
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
if "peer-assessment" in assessment_steps:
peer_assessment_part = {
'title': _('Peer Median Grade'),
'title': _(f'Peer {score_type.capitalize()} Grade'),
'criterion': criterion,
'option': self._peer_median_option(submission_uuid, criterion),
'individual_assessments': [
Expand Down Expand Up @@ -424,7 +427,8 @@ def _peer_median_option(self, submission_uuid, criterion):
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api

median_scores = peer_api.get_assessment_median_scores(submission_uuid)
score_type = get_peer_score_type(self.workflow_requirements()["peer"])
median_scores = peer_api.get_peer_assessment_scores(submission_uuid, score_type)
median_score = median_scores.get(criterion['name'], None)
median_score = -1 if median_score is None else median_score

Expand Down Expand Up @@ -650,11 +654,11 @@ def _get_score_explanation(self, workflow):
complete = score is not None

assessment_type = self._get_assessment_type(workflow)

score_type = get_peer_score_type(self.workflow_requirements()["peer"])
sentences = {
"staff": _("The grade for this problem is determined by your Staff Grade."),
"peer": _(
"The grade for this problem is determined by the median score of "
f"The grade for this problem is determined by the {score_type} score of "
"your Peer Assessments."
),
"self": _("The grade for this problem is determined by your Self Assessment.")
Expand Down
35 changes: 16 additions & 19 deletions openassessment/xblock/static/dist/manifest.json
Original file line number Diff line number Diff line change
@@ -1,22 +1,19 @@
{
"base_url": "/static/dist",
"openassessment-editor-textarea.js": "/openassessment-editor-textarea.b8f866ba96a1d2ad92a4.js",
"openassessment-editor-textarea.js.map": "/openassessment-editor-textarea.b8f866ba96a1d2ad92a4.js.map",
"openassessment-editor-tinymce.js": "/openassessment-editor-tinymce.2cc0cab55c3be729265e.js",
"openassessment-editor-tinymce.js.map": "/openassessment-editor-tinymce.2cc0cab55c3be729265e.js.map",
"openassessment-lms.css": "/openassessment-lms.d876ac9af6fabe98df40.css",
"openassessment-lms.js": "/openassessment-lms.d876ac9af6fabe98df40.js",
"openassessment-lms.css.map": "/openassessment-lms.d876ac9af6fabe98df40.css.map",
"openassessment-lms.js.map": "/openassessment-lms.d876ac9af6fabe98df40.js.map",
"openassessment-ltr.css": "/openassessment-ltr.fd8409d820154aa22da8.css",
"openassessment-ltr.js": "/openassessment-ltr.fd8409d820154aa22da8.js",
"openassessment-ltr.css.map": "/openassessment-ltr.fd8409d820154aa22da8.css.map",
"openassessment-ltr.js.map": "/openassessment-ltr.fd8409d820154aa22da8.js.map",
"openassessment-rtl.css": "/openassessment-rtl.e984d9817bb8252276e7.css",
"openassessment-rtl.js": "/openassessment-rtl.e984d9817bb8252276e7.js",
"openassessment-rtl.css.map": "/openassessment-rtl.e984d9817bb8252276e7.css.map",
"openassessment-rtl.js.map": "/openassessment-rtl.e984d9817bb8252276e7.js.map",
"openassessment-studio.js": "/openassessment-studio.979e8b88dd0d9cee68f7.js",
"openassessment-studio.js.map": "/openassessment-studio.979e8b88dd0d9cee68f7.js.map",
"default-avatar.svg": "/95ec738c0b7faac5b5c9126794446bbd.svg"
"openassessment-lms.css": "/openassessment-lms.e0d673005e081371df0b.css",
"openassessment-lms.js": "/openassessment-lms.e0d673005e081371df0b.js",
"openassessment-studio.js": "/openassessment-studio.033bc67ad44c44a9870d.js",
"openassessment-rtl.css": "/openassessment-rtl.f130ffdc49b7bf41bd03.css",
"openassessment-rtl.js": "/openassessment-rtl.f130ffdc49b7bf41bd03.js",
"openassessment-ltr.css": "/openassessment-ltr.65e3c135076b8cfe5c16.css",
"openassessment-ltr.js": "/openassessment-ltr.65e3c135076b8cfe5c16.js",
"openassessment-editor-textarea.js": "/openassessment-editor-textarea.40ea511c1624ba301289.js",
"openassessment-editor-tinymce.js": "/openassessment-editor-tinymce.b145b829b7aea865e721.js",
"default-avatar.svg": "/95ec738c0b7faac5b5c9126794446bbd.svg",
"openassessment-lms.js.map": "/openassessment-lms.e0d673005e081371df0b.js.map",
"openassessment-studio.js.map": "/openassessment-studio.033bc67ad44c44a9870d.js.map",
"openassessment-rtl.css.map": "/openassessment-rtl.f130ffdc49b7bf41bd03.css.map",
"openassessment-ltr.css.map": "/openassessment-ltr.65e3c135076b8cfe5c16.css.map",
"openassessment-editor-textarea.js.map": "/openassessment-editor-textarea.40ea511c1624ba301289.js.map",
"openassessment-editor-tinymce.js.map": "/openassessment-editor-tinymce.b145b829b7aea865e721.js.map"
}

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit df6bf6a

Please sign in to comment.