diff --git a/openassessment/data.py b/openassessment/data.py index b1ddc7a25c..9768c3c492 100644 --- a/openassessment/data.py +++ b/openassessment/data.py @@ -19,6 +19,7 @@ from django.utils.translation import gettext as _ import requests +from submissions.models import Submission from submissions import api as sub_api from submissions.errors import SubmissionNotFoundError from openassessment.runtime_imports.classes import import_block_structure_transformers, import_external_id @@ -27,6 +28,7 @@ from openassessment.assessment.models import Assessment, AssessmentFeedback, AssessmentPart from openassessment.fileupload.api import get_download_url from openassessment.workflow.models import AssessmentWorkflow, TeamAssessmentWorkflow +from openassessment.assessment.score_type_constants import PEER_TYPE, SELF_TYPE, STAFF_TYPE logger = logging.getLogger(__name__) @@ -95,48 +97,34 @@ def map_anonymized_ids_to_usernames(anonymized_ids): return anonymous_id_to_username_mapping -def map_anonymized_ids_to_emails(anonymized_ids): +def map_anonymized_ids_to_user_data(anonymized_ids): """ Args: anonymized_ids - list of anonymized user ids. Returns: - dictionary, that contains mapping between anonymized user ids and - actual user emails. - """ - User = get_user_model() - - users = _use_read_replica( - User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) - .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) - .values("email", "anonymous_id") - ) - anonymous_id_to_email_mapping = { - user["anonymous_id"]: user["email"] for user in users - } - return anonymous_id_to_email_mapping - - -def map_anonymized_ids_to_fullname(anonymized_ids): - """ - Args: - anonymized_ids - list of anonymized user ids. - Returns: - dictionary, that contains mapping between anonymized user ids and - actual user fullname. - """ + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + """ User = get_user_model() - users = _use_read_replica( User.objects.filter(anonymoususerid__anonymous_user_id__in=anonymized_ids) .select_related("profile") .annotate(anonymous_id=F("anonymoususerid__anonymous_user_id")) - .values("profile__name", "anonymous_id") - ) - - anonymous_id_to_fullname_mapping = { - user["anonymous_id"]: user["profile__name"] for user in users + ).values("username", "email", "profile__name", "anonymous_id") + + anonymous_id_to_user_info_mapping = { + user["anonymous_id"]: { + "username": user["username"], + "email": user["email"], + "fullname": user["profile__name"] + } for user in users } - return anonymous_id_to_fullname_mapping + return anonymous_id_to_user_info_mapping class CsvWriter: """ @@ -1577,3 +1565,161 @@ def get_file_uploads(self, missing_blank=False): files.append(file_upload) self.file_uploads = files return self.file_uploads + + +def score_type_to_string(score_type): + """ + Converts the given score type into its string representation. + + Args: + score_type : System representation of the score type. + + Returns: + : string representation of score_type as needed in Staff Grader Template. + """ + SCORE_TYPE_MAP = { + PEER_TYPE: "Peer", + SELF_TYPE: "Self", + STAFF_TYPE: "Staff", + } + return SCORE_TYPE_MAP.get(score_type, "Unknown") + +def parts_summary(assessment_obj): + """ + Retrieves a summary of the parts from a given assessment object. + + Args: + assessment_obj: assessment object. + + Returns: + list[dict]: A list containing assessment parts summary data dictionaries. + """ + return [ + { + 'criterion_name': part.criterion.name, + 'score_earned': part.points_earned, + 'score_type': part.option.name if part.option else "None", + } + for part in assessment_obj.parts.all() + ] + +def get_scorer_data(anonymous_scorer_id, user_data_mapping): + """ + Retrieves the scorer's data (full name, username, and email) based on their anonymous ID. + + Args: + anonymous_scorer_id : Scorer's anonymous_user_id. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + fullname, username, email : user data values. + """ + scorer_data = user_data_mapping.get(anonymous_scorer_id, {}) + return ( + scorer_data.get('fullname', ""), + scorer_data.get('username', ""), + scorer_data.get('email', "") + ) + +def generate_assessment_data(assessment_list, user_data_mapping): + """ + Creates the list of Assessment's data dictionaries. + + Args: + assessment_list: assessment objects queryset. + user_data_mapping: User data by anonymous_user_id + dict { + : { + 'email': (str) + 'username': (str) + 'fullname': (str) + } + } + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + assessment_data_list = [] + for assessment in assessment_list: + + scorer_name, scorer_username, scorer_email = get_scorer_data(assessment.scorer_id, user_data_mapping) + + assessment_data_list.append({ + "assessment_id": str(assessment.id), + "scorer_name": scorer_name, + "scorer_username": scorer_username, + "scorer_email": scorer_email, + "assesment_date": assessment.scored_at.strftime('%d-%m-%Y'), + "assesment_scores": parts_summary(assessment), + "problem_step": score_type_to_string(assessment.score_type), + "feedback": assessment.feedback or '' + }) + return assessment_data_list + +def generate_received_assessment_data(submission_uuid): + """ + Generates a list of received assessments data based on the submission UUID. + + Args: + submission_uuid (str, optional): The UUID of the submission. Defaults to None. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + + + submission = sub_api.get_submission_and_student(submission_uuid) + + if not submission: + return [] + + assessments = _use_read_replica( + Assessment.objects.prefetch_related('parts'). + prefetch_related('rubric'). + filter( + submission_uuid=submission['uuid'] + ) + ) + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments]) + return generate_assessment_data(assessments, user_data_mapping) + + +def generate_given_assessment_data(item_id, submission_uuid): + """ + Generates a list of given assessments data based on the submission UUID as scorer. + + Args: + item_id (str): The ID of the item. + submission_uuid (str): The UUID of the submission. + + Returns: + list[dict]: A list containing assessment data dictionaries. + """ + scorer_submission = sub_api.get_submission_and_student(submission_uuid) + + if not scorer_submission: + return [] + + scorer_id = scorer_submission['student_item']['student_id'] + + submissions = Submission.objects.filter(student_item__item_id=item_id).values('uuid') + submission_uuids = [sub['uuid'] for sub in submissions] + + if not submission_uuids or not submissions: + return [] + + assessments_made_by_student = _use_read_replica( + Assessment.objects.prefetch_related('parts') + .prefetch_related('rubric') + .filter(scorer_id=scorer_id, submission_uuid__in=submission_uuids) + ) + + user_data_mapping = map_anonymized_ids_to_user_data([assessment.scorer_id for assessment in assessments_made_by_student]) + return generate_assessment_data(assessments_made_by_student, user_data_mapping) diff --git a/openassessment/staffgrader/staff_grader_mixin.py b/openassessment/staffgrader/staff_grader_mixin.py index 639e3ddda1..b8b7709201 100644 --- a/openassessment/staffgrader/staff_grader_mixin.py +++ b/openassessment/staffgrader/staff_grader_mixin.py @@ -15,8 +15,13 @@ from openassessment.assessment.models.base import Assessment, AssessmentPart from openassessment.assessment.models.staff import StaffWorkflow, TeamStaffWorkflow -from openassessment.data import (OraSubmissionAnswerFactory, VersionNotFoundException, map_anonymized_ids_to_emails, - map_anonymized_ids_to_fullname, map_anonymized_ids_to_usernames) +from openassessment.data import ( + OraSubmissionAnswerFactory, + VersionNotFoundException, + map_anonymized_ids_to_user_data, + generate_received_assessment_data, + generate_given_assessment_data +) from openassessment.staffgrader.errors.submission_lock import SubmissionLockContestedError from openassessment.staffgrader.models.submission_lock import SubmissionGradingLock from openassessment.staffgrader.serializers import ( @@ -194,6 +199,42 @@ def list_staff_workflows(self, data, suffix=''): # pylint: disable=unused-argum log.exception("Failed to serialize workflow %d: %s", staff_workflow.id, str(e), exc_info=True) return result + + + @XBlock.json_handler + @require_course_staff("STUDENT_GRADE") + def list_assessments(self, data, suffix=''): # pylint: disable=unused-argument + """ + List the assessments' grades based on the type (received or given) for a specific submission. + + Args: + data (dict): Contains the necessary information to fetch the assessments. + - 'item_id': The ID of the xblock/item. + - 'submission_uuid': The UUID of the submission. + - 'assessment_filter': A string, either "received" or any other value + to determine the type of assessments to retrieve. + + Returns: + list[dict]: A list of dictionaries, each representing an assessment's data. + + Note: + - If 'assessment_filter' is "received", the function fetches assessments received + for the given 'submission_uuid'. + - For any other value of 'assessment_filter', the function fetches assessments + given by the owner of the 'submission_uuid' for other submissions in the same item. + """ + item_id = data['item_id'] + subission_uuid = data['submission_uuid'] + filter_value = data['assessment_filter'] + + if filter_value == "received": + return generate_received_assessment_data(subission_uuid) + elif filter_value == "given": + return generate_given_assessment_data(item_id, subission_uuid) + else: + raise ValueError("Invalid assessment_filter value") + + def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assignment=False): """ Fetch additional required data and models to serialize the response @@ -206,7 +247,8 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign workflow_scorer_ids.add(workflow.scorer_id) course_id = self.get_student_item_dict()['course_id'] - # Fetch user identifier mappings + context = {} + if is_team_assignment: # Look up the team IDs for submissions so we can later map to team names team_submission_uuid_to_team_id = get_team_ids_by_team_submission_uuid(submission_uuids) @@ -214,45 +256,26 @@ def _get_list_workflows_serializer_context(self, staff_workflows, is_team_assign # Look up names for teams topic_id = self.selected_teamset_id team_id_to_team_name = self.teams_service.get_team_names(course_id, topic_id) - - # Do bulk lookup for scorer anonymous ids (submitting team name is a separate lookup) - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(workflow_scorer_ids) - ) - anonymous_id_to_email = map_anonymized_ids_to_emails( - set(workflow_scorer_ids) - ) - anonymous_id_to_fullname = map_anonymized_ids_to_fullname( - set(workflow_scorer_ids) - ) - - context = { + context.update({ 'team_submission_uuid_to_team_id': team_submission_uuid_to_team_id, 'team_id_to_team_name': team_id_to_team_name, - } + }) else: # When we look up usernames we want to include all connected learner student ids submission_uuid_to_student_id = get_student_ids_by_submission_uuid( course_id, submission_uuids, ) + context['submission_uuid_to_student_id'] = submission_uuid_to_student_id - # Do bulk lookup for all anonymous ids (submitters and scoreres). This is used for the - # `gradedBy` and `username` fields - anonymous_id_to_username = map_anonymized_ids_to_usernames( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) - anonymous_id_to_email = map_anonymized_ids_to_emails( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) - - anonymous_id_to_fullname = map_anonymized_ids_to_fullname( - set(submission_uuid_to_student_id.values()) | workflow_scorer_ids - ) + all_anonymous_ids = set(workflow_scorer_ids) + if not is_team_assignment: + all_anonymous_ids |= set(context['submission_uuid_to_student_id'].values()) - context = { - 'submission_uuid_to_student_id': submission_uuid_to_student_id, - } + anonymous_id_to_user_data = map_anonymized_ids_to_user_data(all_anonymous_ids) + anonymous_id_to_username = {key: val["username"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_email = {key: val["email"] for key, val in anonymous_id_to_user_data.items()} + anonymous_id_to_fullname = {key: val["fullname"] for key, val in anonymous_id_to_user_data.items()} # Do a bulk fetch of the assessments linked to the workflows, including all connected # Rubric, Criteria, and Option models diff --git a/openassessment/staffgrader/tests/test_list_staff_workflows.py b/openassessment/staffgrader/tests/test_list_staff_workflows.py index 7d2dff05e9..151cb7015a 100644 --- a/openassessment/staffgrader/tests/test_list_staff_workflows.py +++ b/openassessment/staffgrader/tests/test_list_staff_workflows.py @@ -154,6 +154,28 @@ def _mock_map_anonymized_ids_to_usernames(self): ) as patched_map: yield patched_map + @contextmanager + def _mock_map_anonymized_ids_to_user_data(self): + """ + Context manager that patches map_anonymized_ids_to_user_data and returns a mapping from student IDs to a dictionary + containing username, email, and fullname. + """ + # Creamos un diccionario simulado que represente lo que la función real devuelve + mock_data = { + student_id: { + "username": self.student_id_to_username_map.get(student_id, ""), + "email": "mocked_email@example.com", # O quizás puedes usar un mapa similar para emails si es necesario + "fullname": "Mocked Full Name" # O quizás puedes usar un mapa similar para nombres completos si es necesario + } + for student_id in self.student_id_to_username_map + } + + with patch( + 'openassessment.staffgrader.staff_grader_mixin.map_anonymized_ids_to_user_data', + return_value=mock_data + ) as patched_map: + yield patched_map + def submit_staff_assessment(self, xblock, student, grader, option, option_2=None): """ Helper method to submit a staff assessment @@ -601,20 +623,18 @@ def test_bulk_fetch_annotated_staff_workflows(self, xblock, set_up_grades, set_u def test_get_list_workflows_serializer_context(self, xblock): """ Unit test for _get_list_workflows_serializer_context """ self.set_staff_user(xblock) - # Set up the mock return_value for bulk_deep_fetch_assessments. - # submissions 0 and 3 are the only ones assessed + mock_staff_workflows = [ Mock(scorer_id=self.course_staff[1].student_id), Mock(assessment=None, scorer_id=None), Mock(assessment=None, scorer_id=None), Mock(scorer_id=self.course_staff[2].student_id), ] + with self._mock_get_student_ids_by_submission_uuid() as mock_get_student_ids: - # with self._mock_get_team_ids_by_team_submission_uuid() as mock_get_team_ids: - with self._mock_map_anonymized_ids_to_usernames() as mock_map_ids: + with self._mock_map_anonymized_ids_to_user_data() as mock_map_data: # Cambio importante aquí with patch.object(xblock, 'bulk_deep_fetch_assessments') as mock_bulk_fetch_assessments: - # pylint: disable=protected-access - context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) + context = xblock._get_list_workflows_serializer_context(mock_staff_workflows) # pylint: disable=protected-access mock_get_student_ids.assert_called_once_with( self.course_id, @@ -627,12 +647,14 @@ def test_get_list_workflows_serializer_context(self, xblock): expected_anonymous_id_lookups.update( {self.course_staff[1].student_id, self.course_staff[2].student_id} ) - mock_map_ids.assert_called_once_with(expected_anonymous_id_lookups) + mock_map_data.assert_called_once_with(expected_anonymous_id_lookups) # Cambio importante aquí mock_bulk_fetch_assessments.assert_called_once_with(mock_staff_workflows) expected_context = { 'submission_uuid_to_student_id': mock_get_student_ids.return_value, - 'anonymous_id_to_username': mock_map_ids.return_value, + 'anonymous_id_to_username': {k: v["username"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_email': {k: v["email"] for k, v in mock_map_data.return_value.items()}, + 'anonymous_id_to_fullname': {k: v["fullname"] for k, v in mock_map_data.return_value.items()}, 'submission_uuid_to_assessment': mock_bulk_fetch_assessments.return_value, }