Skip to content

Commit

Permalink
type results exporter
Browse files Browse the repository at this point in the history
  • Loading branch information
fidoriel committed Oct 16, 2023
1 parent baaa7dd commit c8068ce
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 30 deletions.
89 changes: 61 additions & 28 deletions evap/results/exporters.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
import warnings
from collections import OrderedDict, defaultdict
from collections.abc import Generator, Iterable
from itertools import chain, repeat
from typing import Any

import xlwt
from django.db.models import Q
from django.db.models.query import QuerySet
from django.utils.translation import gettext as _

from evap.evaluation.models import CourseType, Degree, Evaluation, Questionnaire
from evap.evaluation.models import CourseType, Degree, Evaluation, Question, Questionnaire, Semester, UserProfile
from evap.evaluation.tools import ExcelExporter
from evap.results.tools import (
EvaluationResult,
RatingResult,
calculate_average_course_distribution,
calculate_average_distribution,
distribution_to_grade,
Expand Down Expand Up @@ -37,24 +42,24 @@ class ResultsExporter(ExcelExporter):
**ExcelExporter.styles,
}

def __init__(self):
def __init__(self) -> None:
super().__init__()

for index, color in self.COLOR_MAPPINGS.items():
self.workbook.set_colour_RGB(index, *color)

@classmethod
def grade_to_style(cls, grade):
def grade_to_style(cls, grade: float) -> str:
return "grade_" + str(cls.normalize_number(grade))

@classmethod
def normalize_number(cls, number):
def normalize_number(cls, number: float) -> float:
"""floors 'number' to a multiply of cls.STEP"""
rounded_number = round(number, 1) # see #302
return round(int(rounded_number / cls.STEP + 0.0001) * cls.STEP, 1)

@classmethod
def init_grade_styles(cls):
def init_grade_styles(cls) -> None:
"""
Adds the grade styles to cls.styles and as a xlwt identifier.
This also notes all registered colors in cls.COLOR_MAPPINGS for the instances.
Expand Down Expand Up @@ -86,7 +91,7 @@ def init_grade_styles(cls):
cls.styles[style_name] = xlwt.easyxf(grade_base_style.format(color_name), num_format_str="0.0")

@staticmethod
def filter_text_and_heading_questions(questions):
def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Question]:
questions = [question for question in questions if not question.is_text_question]

# remove heading questions if they have no "content" below them
Expand All @@ -101,11 +106,18 @@ def filter_text_and_heading_questions(questions):
return filtered_questions

@staticmethod
def filter_evaluations(semesters, evaluation_states, degrees, course_types, contributor, include_not_enough_voters):
def filter_evaluations(
semesters: Iterable[Semester],
evaluation_states: Iterable[int],
degrees: Iterable[int],
course_types: CourseType,
contributor: UserProfile | None,
include_not_enough_voters: bool,
) -> tuple[list[tuple[Evaluation, Any]], Iterable[Questionnaire], bool]:
# pylint: disable=too-many-locals
course_results_exist = False
evaluations_with_results = []
used_questionnaires = set()
used_questionnaires: set[Questionnaire] = set()
evaluations_filter = Q(
course__semester__in=semesters,
state__in=evaluation_states,
Expand All @@ -122,46 +134,49 @@ def filter_evaluations(semesters, evaluation_states, degrees, course_types, cont
continue
if not evaluation.can_publish_rating_results and not include_not_enough_voters:
continue
results = OrderedDict()
results: OrderedDict[EvaluationResult, list[RatingResult]] = OrderedDict()
for contribution_result in get_results(evaluation).contribution_results:
for questionnaire_result in contribution_result.questionnaire_results:
# RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it
question_results: list[RatingResult] = questionnaire_result.question_results # type: ignore[assignment]
if all(
not question_result.question.is_rating_question
or question_result.counts is None
or sum(question_result.counts) == 0
for question_result in questionnaire_result.question_results
for question_result in question_results
):
continue
if (
not contributor
or contribution_result.contributor is None
or contribution_result.contributor == contributor
):
results.setdefault(questionnaire_result.questionnaire.id, []).extend(
questionnaire_result.question_results
)
results.setdefault(questionnaire_result.questionnaire.id, []).extend(question_results)
used_questionnaires.add(questionnaire_result.questionnaire)
evaluation.course_evaluations_count = evaluation.course.evaluations.count()
if evaluation.course_evaluations_count > 1:
evaluation.course_evaluations_count = evaluation.course.evaluations.count() # type: ignore[attr-defined]
if evaluation.course_evaluations_count > 1: # type: ignore[attr-defined]
course_results_exist = True
weight_sum = sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())
evaluation.weight_percentage = int((evaluation.weight / weight_sum) * 100)
evaluation.course.avg_grade = distribution_to_grade(
evaluation.weight_percentage = int((evaluation.weight / weight_sum) * 100) # type: ignore
evaluation.course.avg_grade = distribution_to_grade( # type: ignore[attr-defined]
calculate_average_course_distribution(evaluation.course)
)
evaluations_with_results.append((evaluation, results))

evaluations_with_results.sort(
key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.order, cr[0].full_name)
)
used_questionnaires = sorted(used_questionnaires)

return evaluations_with_results, used_questionnaires, course_results_exist
return evaluations_with_results, sorted(used_questionnaires), course_results_exist

def write_headings_and_evaluation_info(
self, evaluations_with_results, semesters, contributor, degrees, course_types
):
self,
evaluations_with_results: list[tuple[Evaluation, Any]],
semesters: QuerySet[Semester],
contributor: None | UserProfile,
degrees: Iterable[int],
course_types: Iterable[int],
) -> None:
export_name = "Evaluation"
if contributor:
export_name += f"\n{contributor.full_name}"
Expand Down Expand Up @@ -195,7 +210,9 @@ def write_headings_and_evaluation_info(
# One more cell is needed for the question column
self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results))

def write_overall_results(self, evaluations_with_results, course_results_exist):
def write_overall_results(
self, evaluations_with_results: list[tuple[Evaluation, None]], course_results_exist: bool
) -> None:
evaluations = [e for e, __ in evaluations_with_results]

self.write_cell(_("Overall Average Grade"), "bold")
Expand All @@ -215,15 +232,18 @@ def write_overall_results(self, evaluations_with_results, course_results_exist):

if course_results_exist:
# Only query the number of evaluations once and keep track of it here.
count_gt_1 = [e.course_evaluations_count > 1 for e in evaluations]
count_gt_1 = [e.course_evaluations_count > 1 for e in evaluations] # type: ignore[attr-defined]

# Borders only if there is a course grade below. Offset by one column
self.write_empty_row_with_styles(
["default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1]
)

self.write_cell(_("Evaluation weight"), "bold")
weight_percentages = (f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(evaluations, count_gt_1))
weight_percentages: Generator[str | None, None, None] = (
f"{e.weight_percentage}%" if gt1 else None
for e, gt1 in zip(evaluations, count_gt_1) # type: ignore[attr-defined]
)
self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default")

self.write_cell(_("Course Grade"), "bold")
Expand All @@ -232,15 +252,20 @@ def write_overall_results(self, evaluations_with_results, course_results_exist):
self.write_cell()
continue

avg = evaluation.course.avg_grade
avg = evaluation.course.avg_grade # type: ignore[attr-defined]
style = self.grade_to_style(avg) if avg is not None else "border_left_right"
self.write_cell(avg, style)
self.next_row()

# Same reasoning as above.
self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1])

def write_questionnaire(self, questionnaire, evaluations_with_results, contributor):
def write_questionnaire(
self,
questionnaire: Questionnaire,
evaluations_with_results: list[tuple[Evaluation, Any]],
contributor: None | UserProfile,
) -> None:
if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR:
self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold")
else:
Expand Down Expand Up @@ -285,8 +310,13 @@ def write_questionnaire(self, questionnaire, evaluations_with_results, contribut

# pylint: disable=arguments-differ
def export_impl(
self, semesters, selection_list, include_not_enough_voters=False, include_unpublished=False, contributor=None
):
self,
semesters: QuerySet[Semester],
selection_list,
include_not_enough_voters: bool = False,
include_unpublished: bool = False,
contributor: None | UserProfile = None,
) -> None:
# We want to throw early here, since workbook.save() will throw an IndexError otherwise.
assert len(selection_list) > 0

Expand Down Expand Up @@ -316,6 +346,9 @@ def export_impl(
self.write_questionnaire(questionnaire, evaluations_with_results, contributor)

self.write_overall_results(evaluations_with_results, course_results_exist)
# self.write_headings_and_evaluation_info(
# _("Average for this question"), "-", "-", "-", "-"
# )


# See method definition.
Expand Down
4 changes: 2 additions & 2 deletions evap/results/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def cache_results(evaluation, *, refetch_related_objects=True):
caches["results"].set(cache_key, _get_results_impl(evaluation, refetch_related_objects=refetch_related_objects))


def get_results(evaluation):
def get_results(evaluation: Evaluation) -> EvaluationResult:
assert evaluation.state in STATES_WITH_RESULTS_CACHING | {Evaluation.State.IN_EVALUATION}

if evaluation.state == Evaluation.State.IN_EVALUATION:
Expand All @@ -195,7 +195,7 @@ def get_results(evaluation):
]


def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = True):
def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = True) -> EvaluationResult:
if refetch_related_objects:
discard_cached_related_objects(evaluation)

Expand Down

0 comments on commit c8068ce

Please sign in to comment.