From d51385d82bc975cb219a060e6291f9fcff258584 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:53:07 +0100 Subject: [PATCH] Add average results to exporter --- evap/results/exporters.py | 148 ++++++++++++++++++++------- evap/results/tests/test_exporters.py | 64 ++++++------ evap/staff/views.py | 6 +- 3 files changed, 149 insertions(+), 69 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 75f04a318..fe79cd296 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -112,24 +112,29 @@ def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Que return filtered_questions @staticmethod - def filter_evaluations( - semesters: Iterable[Semester], - evaluation_states: Iterable[Evaluation.State], - program_ids: Iterable[int], - course_type_ids: Iterable[int], - contributor: UserProfile | None, - include_not_enough_voters: bool, + def filter_evaluations( # noqa: PLR0912 + semesters: Iterable[Semester] | None = None, + evaluation_states: Iterable[Evaluation.State] | None = None, + program_ids: Iterable[int] | None = None, + course_type_ids: Iterable[int] | None = None, + contributor: UserProfile | None = None, + include_not_enough_voters: bool = False, ) -> tuple[list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], list[Questionnaire], bool]: # pylint: disable=too-many-locals course_results_exist = False evaluations_with_results = [] used_questionnaires: set[Questionnaire] = set() - evaluations_filter = Q( - course__semester__in=semesters, - state__in=evaluation_states, - course__programs__in=program_ids, - course__type__in=course_type_ids, - ) + + evaluations_filter = Q() + if semesters: + evaluations_filter &= Q(course__semester__in=semesters) + if evaluation_states: + evaluations_filter &= Q(state__in=evaluation_states) + if program_ids: + evaluations_filter &= Q(course__programs__in=program_ids) + if course_type_ids: + evaluations_filter &= Q(course__type__in=course_type_ids) + if contributor: evaluations_filter = evaluations_filter & ( Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]) @@ -198,6 +203,8 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") + self.write_cell("Average for this question", "evaluation") + for evaluation, __ in evaluations_with_results: title = evaluation.full_name if len(semesters) > 1: @@ -208,17 +215,19 @@ def write_headings_and_evaluation_info( self.next_row() self.write_cell(_("Programs"), "bold") + self.write_cell("", "program") for evaluation, __ in evaluations_with_results: self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program") self.next_row() self.write_cell(_("Course Type"), "bold") + self.write_cell("", "program") for evaluation, __ in evaluations_with_results: self.write_cell(evaluation.course.type.name, "border_left_right") self.next_row() # One more cell is needed for the question column - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) def write_overall_results( self, @@ -228,14 +237,17 @@ def write_overall_results( annotated_evaluations = [e for e, __ in evaluations_with_results] self.write_cell(_("Overall Average Grade"), "bold") + self.write_cell("", "border_left_right") averages = (distribution_to_grade(calculate_average_distribution(e)) for e in annotated_evaluations) self.write_row(averages, lambda avg: self.grade_to_style(avg) if avg else "border_left_right") self.write_cell(_("Total voters/Total participants"), "bold") + self.write_cell("", "total_voters") voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in annotated_evaluations) self.write_row(voter_ratios, style="total_voters") self.write_cell(_("Evaluation rate"), "bold") + self.write_cell("", "evaluation_rate") # round down like in progress bar participant_percentages = ( f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%" @@ -249,17 +261,19 @@ def write_overall_results( # Borders only if there is a course grade below. Offset by one column self.write_empty_row_with_styles( - ["default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] + ["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] ) self.write_cell(_("Evaluation weight"), "bold") - weight_percentages = ( + self.write_cell("") + weight_percentages = tuple( f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) ) self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") + self.write_cell("") for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True): if not gt1: self.write_cell() @@ -271,58 +285,118 @@ def write_overall_results( self.next_row() # Same reasoning as above. - self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1]) + self.write_empty_row_with_styles( + ["default", "default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1] + ) + + @classmethod + def _calculate_display_result( + cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] + ) -> tuple[float | None, float | None]: + values = [] + count_sum = 0 + approval_count = 0 + + for grade_result in results[questionnaire_id]: + if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): + continue + + values.append(grade_result.average * grade_result.count_sum) + count_sum += grade_result.count_sum + if grade_result.question.is_yes_no_question: + approval_count += grade_result.approval_count + + if not values: + return None, None + + avg = sum(values) / count_sum + if question.is_yes_no_question: + percent_approval = approval_count / count_sum if count_sum > 0 else 0 + return avg, percent_approval + return avg, None + + @classmethod + def _calculate_display_result_average( + cls, + evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + questionnaire_id: int, + question: Question, + ) -> tuple[float | None, float | None]: + avg_values = [] + count_avg = 0 + avg_approval = [] + count_approval = 0 + + for __, results in evaluations_with_results: + if ( + results.get(questionnaire_id) is None + ): # ignore all results without the questionaire for average calculation + continue + avg, percent_approval = cls._calculate_display_result(questionnaire_id, question, results) + if avg is not None: + avg_values.append(avg) + count_avg += 1 + if percent_approval is not None: + avg_approval.append(percent_approval) + count_approval += 1 + + return sum(avg_values) / count_avg if count_avg else None, ( + sum(avg_approval) / count_approval if count_approval else None + ) def write_questionnaire( self, questionnaire: Questionnaire, evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], contributor: UserProfile | None, + all_evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], ) -> None: if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: self.write_cell(questionnaire.public_name, "bold") + self.write_cell("", "border_left_right") + # first cell of row is printed above self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results)) for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") + question_average, question_approval_count = self._calculate_display_result_average( + all_evaluations_with_results, questionnaire.id, question + ) + + if question_average is not None: + if question.is_yes_no_question: + self.write_cell(f"{question_approval_count:.0%}", self.grade_to_style(question_average)) + else: + self.write_cell(question_average, self.grade_to_style(question_average)) + else: + self.write_cell("", "border_left_right") + + # evaluations for __, results in evaluations_with_results: if questionnaire.id not in results or question.is_heading_question: self.write_cell(style="border_left_right") continue - values = [] - count_sum = 0 - approval_count = 0 - - for grade_result in results[questionnaire.id]: - if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): - continue + avg, percent_approval = self._calculate_display_result(questionnaire.id, question, results) - values.append(grade_result.average * grade_result.count_sum) - count_sum += grade_result.count_sum - if grade_result.question.is_yes_no_question: - approval_count += grade_result.approval_count - - if not values: + if avg is None: self.write_cell(style="border_left_right") continue - avg = sum(values) / count_sum if question.is_yes_no_question: - percent_approval = approval_count / count_sum if count_sum > 0 else 0 self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg)) else: self.write_cell(avg, self.grade_to_style(avg)) self.next_row() - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) - # pylint: disable=arguments-differ + # pylint: disable=arguments-differ,too-many-locals def export_impl( self, semesters: QuerySetOrSequence[Semester], @@ -335,6 +409,8 @@ def export_impl( # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 + all_evaluations_with_results, _, _ = self.filter_evaluations(evaluation_states=[Evaluation.State.PUBLISHED]) + for sheet_counter, (program_ids, course_type_ids) in enumerate(selection_list, 1): self.cur_sheet = self.workbook.add_sheet("Sheet " + str(sheet_counter)) self.cur_row = 0 @@ -358,7 +434,9 @@ def export_impl( ) for questionnaire in used_questionnaires: - self.write_questionnaire(questionnaire, evaluations_with_results, contributor) + self.write_questionnaire( + questionnaire, evaluations_with_results, contributor, all_evaluations_with_results + ) self.write_overall_results(evaluations_with_results, course_results_exist) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 7d60d3366..c77fc9ca9 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -175,12 +175,12 @@ def test_view_excel_file_sorted(self): # Load responses as Excel files and check for correct sorting workbook = xlrd.open_workbook(file_contents=content_de.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation1\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation2\n") workbook = xlrd.open_workbook(file_contents=content_en.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation2\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation1\n") def test_course_type_ordering(self): program = baker.make(Program) @@ -221,8 +221,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_2.full_name + "\n") course_type_2.order = 0 course_type_2.save() @@ -234,8 +234,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_1.full_name + "\n") def test_multiple_sheets(self): binary_content = BytesIO() @@ -287,17 +287,17 @@ def test_include_unpublished(self): sheet = self.get_export_sheet( include_unpublished=False, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) # Now, make sure that it appears when wanted sheet = self.get_export_sheet( include_unpublished=True, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) # These two should be ordered according to evaluation.course.type.order - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) - self.assertEqual(sheet.row_values(0)[2][:-1], unpublished_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[3][:-1], unpublished_evaluation.full_name) def test_include_not_enough_voters(self): semester = baker.make(Semester) @@ -326,15 +326,15 @@ def test_include_not_enough_voters(self): # First, make sure that the one with only a single voter does not appear sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=False) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], enough_voters_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], enough_voters_evaluation.full_name) # Now, check with the option enabled sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=True) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) self.assertEqual( {enough_voters_evaluation.full_name, not_enough_voters_evaluation.full_name}, - {sheet.row_values(0)[1][:-1], sheet.row_values(0)[2][:-1]}, + {sheet.row_values(0)[2][:-1], sheet.row_values(0)[3][:-1]}, ) def test_no_program_or_course_type(self): @@ -350,7 +350,7 @@ def test_exclude_single_result(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) self.assertEqual( - len(sheet.row_values(0)), 1, "There should be no column for the evaluation, only the row description" + len(sheet.row_values(0)), 2, "There should be no column for the evaluation, only the row description" ) def test_exclude_used_but_unanswered_questionnaires(self): @@ -386,7 +386,7 @@ def test_program_course_type_name(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [course_type.id]) - self.assertEqual(sheet.col_values(1)[1:3], [program.name, course_type.name]) + self.assertEqual(sheet.col_values(2)[1:3], [program.name, course_type.name]) def test_multiple_evaluations(self): semester = baker.make(Semester) @@ -402,7 +402,7 @@ def test_multiple_evaluations(self): sheet = self.get_export_sheet(semester, program, [evaluation1.course.type.id, evaluation2.course.type.id]) - self.assertEqual(set(sheet.row_values(0)[1:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) + self.assertEqual(set(sheet.row_values(0)[2:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) def test_correct_grades_and_bottom_numbers(self): program = baker.make(Program) @@ -426,11 +426,11 @@ def test_correct_grades_and_bottom_numbers(self): sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) - self.assertEqual(sheet.row_values(5)[1], 2.0) # question 1 average - self.assertEqual(sheet.row_values(8)[1], 3.0) # question 2 average - self.assertEqual(sheet.row_values(10)[1], 2.5) # Average grade - self.assertEqual(sheet.row_values(11)[1], "5/10") # Voters / Participants - self.assertEqual(sheet.row_values(12)[1], "50%") # Voter percentage + self.assertEqual(sheet.row_values(5)[2], 2.0) # question 1 average + self.assertEqual(sheet.row_values(8)[2], 3.0) # question 2 average + self.assertEqual(sheet.row_values(10)[2], 2.5) # Average grade + self.assertEqual(sheet.row_values(11)[2], "5/10") # Voters / Participants + self.assertEqual(sheet.row_values(12)[2], "50%") # Voter percentage def test_course_grade(self): program = baker.make(Program) @@ -458,9 +458,9 @@ def test_course_grade(self): cache_results(evaluation) sheet = self.get_export_sheet(course.semester, program, [course.type.id]) - self.assertEqual(sheet.row_values(12)[1], expected_average) self.assertEqual(sheet.row_values(12)[2], expected_average) self.assertEqual(sheet.row_values(12)[3], expected_average) + self.assertEqual(sheet.row_values(12)[4], expected_average) def test_yes_no_question_result(self): program = baker.make(Program) @@ -481,7 +481,7 @@ def test_yes_no_question_result(self): sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) self.assertEqual(sheet.row_values(5)[0], question.text) - self.assertEqual(sheet.row_values(5)[1], "67%") + self.assertEqual(sheet.row_values(5)[2], "67%") def test_contributor_result_export(self): program = baker.make(Program) @@ -528,24 +528,24 @@ def test_contributor_result_export(self): workbook = xlrd.open_workbook(file_contents=binary_content) self.assertEqual( - workbook.sheets()[0].row_values(0)[1], + workbook.sheets()[0].row_values(0)[2], f"{evaluation_1.full_name}\n{evaluation_1.course.semester.name}\n{contributor.full_name}", ) self.assertEqual( - workbook.sheets()[0].row_values(0)[2], + workbook.sheets()[0].row_values(0)[3], f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}", ) self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.public_name) self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text) - self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0) + self.assertEqual(workbook.sheets()[0].row_values(5)[3], 4.0) self.assertEqual( workbook.sheets()[0].row_values(7)[0], f"{contributor_questionnaire.public_name} ({contributor.full_name})", ) self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text) - self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0) + self.assertEqual(workbook.sheets()[0].row_values(8)[3], 3.0) self.assertEqual(workbook.sheets()[0].row_values(10)[0], "Overall Average Grade") - self.assertEqual(workbook.sheets()[0].row_values(10)[2], 3.25) + self.assertEqual(workbook.sheets()[0].row_values(10)[3], 3.25) def test_text_answer_export(self): evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, can_publish_text_results=True) diff --git a/evap/staff/views.py b/evap/staff/views.py index 01cf657ee..49470f0a9 100644 --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -5,7 +5,7 @@ from dataclasses import dataclass from datetime import date, datetime from enum import Enum -from typing import Any, Final, Literal, cast +from typing import Any, Literal, cast import openpyxl from django.conf import settings @@ -1523,7 +1523,9 @@ def evaluation_person_management(request, evaluation_id: int): raise SuspiciousOperation("Invalid POST operation") import_action = ImportAction.from_operation(operation) - import_type: Final = ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR + import_type: Literal[ImportType.PARTICIPANT, ImportType.CONTRIBUTOR] = ( + ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR + ) excel_form = participant_excel_form if "participants" in operation else contributor_excel_form copy_form = participant_copy_form if "participants" in operation else contributor_copy_form