From 85bf48735a82ff7550987d250d3ccd681fb96a55 Mon Sep 17 00:00:00 2001
From: Maria Grimaldi
Date: Tue, 26 Sep 2023 18:48:24 -0400
Subject: [PATCH 01/30] feat: implement mean score strategy
---
openassessment/assessment/api/peer.py | 60 ++++++++++++++--
openassessment/assessment/models/base.py | 71 +++++++++++++++++++
openassessment/data.py | 2 +-
.../legacy/edit/oa_edit_peer_assessment.html | 12 ++++
openassessment/xblock/config_mixin.py | 18 ++++-
openassessment/xblock/grade_mixin.py | 14 ++--
...penassessment-lms.7430e499fae20eeff7bd.js} | 0
...enassessment-ltr.5b291771f2af113d4918.css} | 0
...openassessment-ltr.65e3c135076b8cfe5c16.js | 0
...enassessment-rtl.731b1e1ea896e74cb5c0.css} | 0
...openassessment-rtl.f130ffdc49b7bf41bd03.js | 0
...assessment-studio.44a98dc6a1d4b7f295cd.js} | 0
.../js/src/studio/oa_edit_assessment.js | 18 +++++
openassessment/xblock/studio_mixin.py | 1 +
.../ui_mixins/mfe/ora_config_serializer.py | 1 +
openassessment/xblock/utils/defaults.py | 4 ++
openassessment/xblock/utils/schema.py | 2 +
openassessment/xblock/utils/xml.py | 7 ++
openassessment/xblock/workflow_mixin.py | 5 +-
settings/base.py | 8 ++-
20 files changed, 209 insertions(+), 14 deletions(-)
rename openassessment/xblock/static/dist/{openassessment-lms.dc8bb1e464bcaaab4668.js => openassessment-lms.7430e499fae20eeff7bd.js} (100%)
rename openassessment/xblock/static/dist/{openassessment-ltr.7955a1e2cc11fc6948de.css => openassessment-ltr.5b291771f2af113d4918.css} (100%)
create mode 100644 openassessment/xblock/static/dist/openassessment-ltr.65e3c135076b8cfe5c16.js
rename openassessment/xblock/static/dist/{openassessment-rtl.9de7c9bc7c1048c07707.css => openassessment-rtl.731b1e1ea896e74cb5c0.css} (100%)
create mode 100644 openassessment/xblock/static/dist/openassessment-rtl.f130ffdc49b7bf41bd03.js
rename openassessment/xblock/static/dist/{openassessment-studio.d576fb212cefa2e4b720.js => openassessment-studio.44a98dc6a1d4b7f295cd.js} (100%)
diff --git a/openassessment/assessment/api/peer.py b/openassessment/assessment/api/peer.py
index 467c1b5a8c..b5a99b170d 100644
--- a/openassessment/assessment/api/peer.py
+++ b/openassessment/assessment/api/peer.py
@@ -27,6 +27,12 @@
FLEXIBLE_PEER_GRADING_GRADED_BY_PERCENTAGE = 30
+class GradingStrategy:
+ """Grading strategies for peer assessments."""
+ MEAN = "mean"
+ MEDIAN = "median"
+
+
def flexible_peer_grading_enabled(peer_requirements, course_settings):
"""
Is flexible peer grading turned on? Either at the course override
@@ -51,6 +57,13 @@ def flexible_peer_grading_active(submission_uuid, peer_requirements, course_sett
return days_elapsed >= FLEXIBLE_PEER_GRADING_REQUIRED_SUBMISSION_AGE_IN_DAYS
+def get_peer_grading_strategy(peer_requirements):
+ """
+ Get the peer grading type, either mean or median. Default is median.
+ """
+ return peer_requirements.get("grading_strategy", GradingStrategy.MEDIAN)
+
+
def required_peer_grades(submission_uuid, peer_requirements, course_settings):
"""
Given a submission id, finds how many peer assessment required.
@@ -280,11 +293,10 @@ def get_score(submission_uuid, peer_requirements, course_settings):
scored_item.scored = True
scored_item.save()
assessments = [item.assessment for item in items]
-
+ grading_strategy = get_peer_grading_strategy(peer_requirements)
+ scores_dict = get_peer_assessment_scores(submission_uuid, grading_strategy)
return {
- "points_earned": sum(
- get_assessment_median_scores(submission_uuid).values()
- ),
+ "points_earned": sum(scores_dict.values()),
"points_possible": assessments[0].points_possible,
"contributing_assessments": [assessment.id for assessment in assessments],
"staff_id": None,
@@ -500,6 +512,46 @@ def get_rubric_max_scores(submission_uuid):
logger.exception(error_message)
raise PeerAssessmentInternalError(error_message) from ex
+def get_peer_assessment_scores(submission_uuid, grading_strategy="median"):
+ """Get the median/mean score for each rubric criterion
+
+ For a given assessment, collect the median/mean score for each criterion on the
+ rubric. This set can be used to determine the overall score, as well as each
+ part of the individual rubric scores.
+
+ If there is a true median/mean score, it is returned. If there are two median/mean
+ values, the average of those two values is returned, rounded up to the
+ greatest integer value.
+
+ Args:
+ submission_uuid (str): The submission uuid is used to get the
+ assessments used to score this submission, and generate the
+ appropriate median/mean score.
+ grading_strategy (str): The grading strategy to use when calculating
+ the median/mean score. Default is "median".
+
+ Returns:
+ dict: A dictionary of rubric criterion names,
+ with a median/mean score of the peer assessments.
+
+ Raises:
+ PeerAssessmentInternalError: If any error occurs while retrieving
+ information to form the median/mean scores, an error is raised.
+ """
+ try:
+ workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
+ items = workflow.graded_by.filter(scored=True)
+ assessments = [item.assessment for item in items]
+ scores = Assessment.scores_by_criterion(assessments)
+ return Assessment.get_score_dict(scores, grading_strategy=grading_strategy)
+ except PeerWorkflow.DoesNotExist:
+ return {}
+ except DatabaseError as ex:
+ error_message = (
+ "Error getting assessment median scores for submission {uuid}"
+ ).format(uuid=submission_uuid)
+ logger.exception(error_message)
+ raise PeerAssessmentInternalError(error_message) from ex
def get_assessment_median_scores(submission_uuid):
"""Get the median score for each rubric criterion
diff --git a/openassessment/assessment/models/base.py b/openassessment/assessment/models/base.py
index 938356fe94..d969de8955 100644
--- a/openassessment/assessment/models/base.py
+++ b/openassessment/assessment/models/base.py
@@ -15,6 +15,7 @@
from collections import defaultdict
from copy import deepcopy
+from django.conf import settings
from hashlib import sha1
import json
import logging
@@ -488,6 +489,24 @@ def create(cls, rubric, scorer_id, submission_uuid, score_type, feedback=None, s
return cls.objects.create(**assessment_params)
+ @classmethod
+ def get_score_dict(cls, scores_dict, score_type="median"):
+ """Determine the score in a dictionary of lists of scores based on the score type
+ if the feature flag is enabled, otherwise use the median score calculation.
+
+ Args:
+ scores_dict (dict): A dictionary of lists of int values. These int values
+ are reduced to a single value that represents the median.
+ score_type (str): The type of score to calculate. Defaults to "median".
+
+ Returns:
+ (dict): A dictionary with criterion name keys and median score
+ values.
+ """
+ if settings.FEATURES.get('ENABLE_ORA_PEER_CONFIGURABLE_GRADING'):
+ return getattr(cls, f"get_{score_type}_score_dict")(scores_dict)
+ return cls.get_median_score_dict(scores_dict)
+
@classmethod
def get_median_score_dict(cls, scores_dict):
"""Determine the median score in a dictionary of lists of scores
@@ -518,6 +537,36 @@ def get_median_score_dict(cls, scores_dict):
median_scores[criterion] = criterion_score
return median_scores
+ @classmethod
+ def get_mean_score_dict(cls, scores_dict):
+ """Determine the mean score in a dictionary of lists of scores
+
+ For a dictionary of lists, where each list contains a set of scores,
+ determine the mean value in each list.
+
+ Args:
+ scores_dict (dict): A dictionary of lists of int values. These int
+ values are reduced to a single value that represents the median.
+
+ Returns:
+ (dict): A dictionary with criterion name keys and mean score
+ values.
+
+ Examples:
+ >>> scores = {
+ >>> "foo": [1, 2, 3, 4, 5],
+ >>> "bar": [6, 7, 8, 9, 10]
+ >>> }
+ >>> Assessment.get_mean_score_dict(scores)
+ {"foo": 3, "bar": 8}
+
+ """
+ median_scores = {}
+ for criterion, criterion_scores in scores_dict.items():
+ criterion_score = Assessment.get_mean_score(criterion_scores)
+ median_scores[criterion] = criterion_score
+ return median_scores
+
@staticmethod
def get_median_score(scores):
"""Determine the median score in a list of scores
@@ -552,6 +601,28 @@ def get_median_score(scores):
)
return median_score
+ @staticmethod
+ def get_mean_score(scores):
+ """Determine the median score in a list of scores
+
+ Determine the median value in the list.
+
+ Args:
+ scores (list): A list of int values. These int values
+ are reduced to a single value that represents the median.
+
+ Returns:
+ (int): The median score.
+
+ Examples:
+ >>> scores = 1, 2, 3, 4, 5]
+ >>> Assessment.get_median_score(scores)
+ 3
+
+ """
+ total_criterion_scores = len(scores)
+ return int(math.ceil(sum(scores) / float(total_criterion_scores)))
+
@classmethod
def scores_by_criterion(cls, assessments):
"""Create a dictionary of lists for scores associated with criterion
diff --git a/openassessment/data.py b/openassessment/data.py
index 2f116f5467..7a7bd7830b 100644
--- a/openassessment/data.py
+++ b/openassessment/data.py
@@ -938,7 +938,7 @@ def generate_assessment_data(cls, xblock_id, submission_uuid=None):
)
if assessments:
scores = Assessment.scores_by_criterion(assessments)
- median_scores = Assessment.get_median_score_dict(scores)
+ median_scores = Assessment.get_score_dict(scores)
else:
# If no assessments, just report submission data.
median_scores = []
diff --git a/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html b/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html
index d4f8c69532..7a10a45c81 100644
--- a/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html
+++ b/openassessment/templates/legacy/edit/oa_edit_peer_assessment.html
@@ -54,6 +54,18 @@
{% endif %}
+ {% if enable_peer_configurable_grading %}
+
+
+
+
+
+
{% trans "Select the preferred grading strategy for the peer assessment. By default, the median across all peer reviews is used to calculate the final grade. If you select the mean, the average of all peer reviews will be used." %}