From c8068ce8f2a04157586785dcb058aadd86e8d4ed Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:29:11 +0200 Subject: [PATCH] type results exporter --- evap/results/exporters.py | 89 +++++++++++++++++++++++++++------------ evap/results/tools.py | 4 +- 2 files changed, 63 insertions(+), 30 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 2cce248693..7325c3f56a 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -1,14 +1,19 @@ import warnings from collections import OrderedDict, defaultdict +from collections.abc import Generator, Iterable from itertools import chain, repeat +from typing import Any import xlwt from django.db.models import Q +from django.db.models.query import QuerySet from django.utils.translation import gettext as _ -from evap.evaluation.models import CourseType, Degree, Evaluation, Questionnaire +from evap.evaluation.models import CourseType, Degree, Evaluation, Question, Questionnaire, Semester, UserProfile from evap.evaluation.tools import ExcelExporter from evap.results.tools import ( + EvaluationResult, + RatingResult, calculate_average_course_distribution, calculate_average_distribution, distribution_to_grade, @@ -37,24 +42,24 @@ class ResultsExporter(ExcelExporter): **ExcelExporter.styles, } - def __init__(self): + def __init__(self) -> None: super().__init__() for index, color in self.COLOR_MAPPINGS.items(): self.workbook.set_colour_RGB(index, *color) @classmethod - def grade_to_style(cls, grade): + def grade_to_style(cls, grade: float) -> str: return "grade_" + str(cls.normalize_number(grade)) @classmethod - def normalize_number(cls, number): + def normalize_number(cls, number: float) -> float: """floors 'number' to a multiply of cls.STEP""" rounded_number = round(number, 1) # see #302 return round(int(rounded_number / cls.STEP + 0.0001) * cls.STEP, 1) @classmethod - def init_grade_styles(cls): + def init_grade_styles(cls) -> None: """ Adds the grade styles to cls.styles and as a xlwt identifier. This also notes all registered colors in cls.COLOR_MAPPINGS for the instances. @@ -86,7 +91,7 @@ def init_grade_styles(cls): cls.styles[style_name] = xlwt.easyxf(grade_base_style.format(color_name), num_format_str="0.0") @staticmethod - def filter_text_and_heading_questions(questions): + def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Question]: questions = [question for question in questions if not question.is_text_question] # remove heading questions if they have no "content" below them @@ -101,11 +106,18 @@ def filter_text_and_heading_questions(questions): return filtered_questions @staticmethod - def filter_evaluations(semesters, evaluation_states, degrees, course_types, contributor, include_not_enough_voters): + def filter_evaluations( + semesters: Iterable[Semester], + evaluation_states: Iterable[int], + degrees: Iterable[int], + course_types: CourseType, + contributor: UserProfile | None, + include_not_enough_voters: bool, + ) -> tuple[list[tuple[Evaluation, Any]], Iterable[Questionnaire], bool]: # pylint: disable=too-many-locals course_results_exist = False evaluations_with_results = [] - used_questionnaires = set() + used_questionnaires: set[Questionnaire] = set() evaluations_filter = Q( course__semester__in=semesters, state__in=evaluation_states, @@ -122,15 +134,16 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont continue if not evaluation.can_publish_rating_results and not include_not_enough_voters: continue - results = OrderedDict() + results: OrderedDict[EvaluationResult, list[RatingResult]] = OrderedDict() for contribution_result in get_results(evaluation).contribution_results: for questionnaire_result in contribution_result.questionnaire_results: # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it + question_results: list[RatingResult] = questionnaire_result.question_results # type: ignore[assignment] if all( not question_result.question.is_rating_question or question_result.counts is None or sum(question_result.counts) == 0 - for question_result in questionnaire_result.question_results + for question_result in question_results ): continue if ( @@ -138,16 +151,14 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont or contribution_result.contributor is None or contribution_result.contributor == contributor ): - results.setdefault(questionnaire_result.questionnaire.id, []).extend( - questionnaire_result.question_results - ) + results.setdefault(questionnaire_result.questionnaire.id, []).extend(question_results) used_questionnaires.add(questionnaire_result.questionnaire) - evaluation.course_evaluations_count = evaluation.course.evaluations.count() - if evaluation.course_evaluations_count > 1: + evaluation.course_evaluations_count = evaluation.course.evaluations.count() # type: ignore[attr-defined] + if evaluation.course_evaluations_count > 1: # type: ignore[attr-defined] course_results_exist = True weight_sum = sum(evaluation.weight for evaluation in evaluation.course.evaluations.all()) - evaluation.weight_percentage = int((evaluation.weight / weight_sum) * 100) - evaluation.course.avg_grade = distribution_to_grade( + evaluation.weight_percentage = int((evaluation.weight / weight_sum) * 100) # type: ignore + evaluation.course.avg_grade = distribution_to_grade( # type: ignore[attr-defined] calculate_average_course_distribution(evaluation.course) ) evaluations_with_results.append((evaluation, results)) @@ -155,13 +166,17 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont evaluations_with_results.sort( key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.order, cr[0].full_name) ) - used_questionnaires = sorted(used_questionnaires) - return evaluations_with_results, used_questionnaires, course_results_exist + return evaluations_with_results, sorted(used_questionnaires), course_results_exist def write_headings_and_evaluation_info( - self, evaluations_with_results, semesters, contributor, degrees, course_types - ): + self, + evaluations_with_results: list[tuple[Evaluation, Any]], + semesters: QuerySet[Semester], + contributor: None | UserProfile, + degrees: Iterable[int], + course_types: Iterable[int], + ) -> None: export_name = "Evaluation" if contributor: export_name += f"\n{contributor.full_name}" @@ -195,7 +210,9 @@ def write_headings_and_evaluation_info( # One more cell is needed for the question column self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) - def write_overall_results(self, evaluations_with_results, course_results_exist): + def write_overall_results( + self, evaluations_with_results: list[tuple[Evaluation, None]], course_results_exist: bool + ) -> None: evaluations = [e for e, __ in evaluations_with_results] self.write_cell(_("Overall Average Grade"), "bold") @@ -215,7 +232,7 @@ def write_overall_results(self, evaluations_with_results, course_results_exist): if course_results_exist: # Only query the number of evaluations once and keep track of it here. - count_gt_1 = [e.course_evaluations_count > 1 for e in evaluations] + count_gt_1 = [e.course_evaluations_count > 1 for e in evaluations] # type: ignore[attr-defined] # Borders only if there is a course grade below. Offset by one column self.write_empty_row_with_styles( @@ -223,7 +240,10 @@ def write_overall_results(self, evaluations_with_results, course_results_exist): ) self.write_cell(_("Evaluation weight"), "bold") - weight_percentages = (f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(evaluations, count_gt_1)) + weight_percentages: Generator[str | None, None, None] = ( + f"{e.weight_percentage}%" if gt1 else None + for e, gt1 in zip(evaluations, count_gt_1) # type: ignore[attr-defined] + ) self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") @@ -232,7 +252,7 @@ def write_overall_results(self, evaluations_with_results, course_results_exist): self.write_cell() continue - avg = evaluation.course.avg_grade + avg = evaluation.course.avg_grade # type: ignore[attr-defined] style = self.grade_to_style(avg) if avg is not None else "border_left_right" self.write_cell(avg, style) self.next_row() @@ -240,7 +260,12 @@ def write_overall_results(self, evaluations_with_results, course_results_exist): # Same reasoning as above. self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1]) - def write_questionnaire(self, questionnaire, evaluations_with_results, contributor): + def write_questionnaire( + self, + questionnaire: Questionnaire, + evaluations_with_results: list[tuple[Evaluation, Any]], + contributor: None | UserProfile, + ) -> None: if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: @@ -285,8 +310,13 @@ def write_questionnaire(self, questionnaire, evaluations_with_results, contribut # pylint: disable=arguments-differ def export_impl( - self, semesters, selection_list, include_not_enough_voters=False, include_unpublished=False, contributor=None - ): + self, + semesters: QuerySet[Semester], + selection_list, + include_not_enough_voters: bool = False, + include_unpublished: bool = False, + contributor: None | UserProfile = None, + ) -> None: # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 @@ -316,6 +346,9 @@ def export_impl( self.write_questionnaire(questionnaire, evaluations_with_results, contributor) self.write_overall_results(evaluations_with_results, course_results_exist) + # self.write_headings_and_evaluation_info( + # _("Average for this question"), "-", "-", "-", "-" + # ) # See method definition. diff --git a/evap/results/tools.py b/evap/results/tools.py index fceefa3bf4..0160cce442 100644 --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -174,7 +174,7 @@ def cache_results(evaluation, *, refetch_related_objects=True): caches["results"].set(cache_key, _get_results_impl(evaluation, refetch_related_objects=refetch_related_objects)) -def get_results(evaluation): +def get_results(evaluation: Evaluation) -> EvaluationResult: assert evaluation.state in STATES_WITH_RESULTS_CACHING | {Evaluation.State.IN_EVALUATION} if evaluation.state == Evaluation.State.IN_EVALUATION: @@ -195,7 +195,7 @@ def get_results(evaluation): ] -def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = True): +def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = True) -> EvaluationResult: if refetch_related_objects: discard_cached_related_objects(evaluation)