Skip to content

Commit

Permalink
drop python 3.9 support (#91)
Browse files Browse the repository at this point in the history
Category: CI
JIRA issue: https://jira.ihme.washington.edu/browse/MIC-5524

Changes and notes
Remove python 3.9 from build matrices.
Modernize type hinting.

Testing
Built repo using cookiecutter.
  • Loading branch information
hussain-jafari authored Nov 12, 2024
1 parent 594721d commit b7ae7fb
Show file tree
Hide file tree
Showing 7 changed files with 28 additions and 35 deletions.
2 changes: 1 addition & 1 deletion {{cookiecutter.package_name}}/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ all necessary requirements as follows::
({{ cookiecutter.package_name }}) :~$ pip install -e .
...pip will install vivarium and other requirements...

Supported Python versions: 3.9, 3.10, 3.11
Supported Python versions: 3.10, 3.11

Note the ``-e`` flag that follows pip install. This will install the python
package in-place, which is important for making the model specifications later.
Expand Down
2 changes: 1 addition & 1 deletion {{cookiecutter.package_name}}/python_versions.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
["3.9", "3.10", "3.11"]
["3.10", "3.11"]
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
"""
from pathlib import Path
from typing import Optional

import pandas as pd
from loguru import logger
Expand Down Expand Up @@ -50,7 +49,7 @@ def open_artifact(output_path: Path, location: str) -> Artifact:


def load_and_write_data(
artifact: Artifact, key: str, location: str, years: Optional[str], replace: bool
artifact: Artifact, key: str, location: str, years: str | None, replace: bool
):
"""Loads data and writes it to the artifact if not already present.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
No logging is done here. Logging is done in vivarium inputs itself and forwarded.
"""
from typing import List, Optional, Union

import numpy as np
import pandas as pd
from gbd_mapping import causes, covariates, risk_factors
Expand All @@ -29,7 +27,7 @@


def get_data(
lookup_key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
lookup_key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
"""Retrieves data from an appropriate source.
Expand Down Expand Up @@ -66,7 +64,7 @@ def get_data(


def load_population_location(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> str:
if key != data_keys.POPULATION.LOCATION:
raise ValueError(f"Unrecognized key {key}")
Expand All @@ -75,38 +73,38 @@ def load_population_location(


def load_population_structure(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
return interface.get_population_structure(location, years)


def load_age_bins(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
return interface.get_age_bins()


def load_demographic_dimensions(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
return interface.get_demographic_dimensions(location, years)


def load_theoretical_minimum_risk_life_expectancy(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
return interface.get_theoretical_minimum_risk_life_expectancy()


def load_standard_data(
key: str, location: str, years: Optional[Union[int, str, List[int]]] = None
key: str, location: str, years: int | str | list[int] | None = None
) -> pd.DataFrame:
key = EntityKey(key)
entity = get_entity(key)
return interface.get_measure(entity, key.measure, location, years).droplevel("location")


def load_metadata(key: str, location: str, years: Optional[Union[int, str, List[int]]] = None):
def load_metadata(key: str, location: str, years: int | str | list[int] | None = None):
key = EntityKey(key)
entity = get_entity(key)
entity_metadata = entity[key.measure]
Expand All @@ -115,7 +113,7 @@ def load_metadata(key: str, location: str, years: Optional[Union[int, str, List[
return entity_metadata


def load_categorical_paf(key: str, location: str, years: Optional[Union[int, str, List[int]]] = None) -> pd.DataFrame:
def load_categorical_paf(key: str, location: str, years: int | str | list[int] | None = None) -> pd.DataFrame:
try:
risk = {
# todo add keys as needed
Expand Down Expand Up @@ -163,7 +161,7 @@ def _load_em_from_meid(location, meid, measure):
# TODO - add project-specific data functions here


def get_entity(key: Union[str, EntityKey]):
def get_entity(key: str | EntityKey):
# Map of entity types to their gbd mappings.
type_map = {
"cause": causes,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import Optional, Tuple

import click
from loguru import logger
from vivarium.framework.utilities import handle_exceptions
Expand Down Expand Up @@ -52,10 +50,10 @@
)
def make_artifacts(
location: str,
years: Optional[str],
years: str | None,
output_dir: str,
append: bool,
replace_keys: Tuple[str, ...],
replace_keys: tuple[str, ...],
verbose: int,
with_debugger: bool,
) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import sys
import time
from pathlib import Path
from typing import Optional, Tuple, Union

import click
from loguru import logger
Expand All @@ -30,7 +29,7 @@ def running_from_cluster() -> bool:


def check_for_existing(
output_dir: Path, location: str, append: bool, replace_keys: Tuple
output_dir: Path, location: str, append: bool, replace_keys: tuple
) -> None:
existing_artifacts = set(
[
Expand Down Expand Up @@ -66,18 +65,18 @@ def check_for_existing(


def build_single(
location: str, years: Optional[str], output_dir: str, replace_keys: Tuple
location: str, years: str | None, output_dir: str, replace_keys: tuple
) -> None:
path = Path(output_dir) / f"{sanitize_location(location)}.hdf"
build_single_location_artifact(path, location, years, replace_keys)


def build_artifacts(
location: str,
years: Optional[str],
years: str | None,
output_dir: str,
append: bool,
replace_keys: Tuple,
replace_keys: tuple,
verbose: int,
) -> None:
"""Main application function for building artifacts.
Expand Down Expand Up @@ -196,10 +195,10 @@ def build_all_artifacts(output_dir: Path, verbose: int) -> None:


def build_single_location_artifact(
path: Union[str, Path],
path: str | Path,
location: str,
years: Optional[str],
replace_keys: Tuple = (),
years: str | None,
replace_keys: tuple = (),
log_to_file: bool = False,
) -> None:
"""Builds an artifact for a single location.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from pathlib import Path
from typing import List, Tuple, Union

import click
import numpy as np
Expand All @@ -11,7 +10,7 @@

from {{cookiecutter.package_name}}.constants import metadata

SeededDistribution = Tuple[str, stats.rv_continuous]
SeededDistribution = tuple[str, stats.rv_continuous]


def len_longest_location() -> int:
Expand Down Expand Up @@ -42,7 +41,7 @@ def sanitize_location(location: str):
return location.replace(" ", "_").replace("'", "_").lower()


def delete_if_exists(*paths: Union[Path, List[Path]], confirm=False):
def delete_if_exists(*paths: Path | list[Path], confirm=False):
paths = paths[0] if isinstance(paths[0], list) else paths
existing_paths = [p for p in paths if p.exists()]
if existing_paths:
Expand Down Expand Up @@ -86,7 +85,7 @@ def read_data_by_draw(artifact_path: str, key : str, draw: int) -> pd.DataFrame:
def get_norm(
mean: float,
sd: float = None,
ninety_five_pct_confidence_interval: Tuple[float, float] = None
ninety_five_pct_confidence_interval: tuple[float, float] = None
) -> stats.norm:
sd = _get_standard_deviation(mean, sd, ninety_five_pct_confidence_interval)
return stats.norm(loc=mean, scale=sd)
Expand All @@ -95,7 +94,7 @@ def get_norm(
def get_truncnorm(
mean: float,
sd: float = None,
ninety_five_pct_confidence_interval: Tuple[float, float] = None,
ninety_five_pct_confidence_interval: tuple[float, float] = None,
lower_clip: float = 0.0,
upper_clip: float = 1.0
) -> stats.norm:
Expand All @@ -106,7 +105,7 @@ def get_truncnorm(


def _get_standard_deviation(
mean: float, sd: float, ninety_five_pct_confidence_interval: Tuple[float, float]
mean: float, sd: float, ninety_five_pct_confidence_interval: tuple[float, float]
) -> float:
if sd is None and ninety_five_pct_confidence_interval is None:
raise ValueError("Must provide either a standard deviation or a 95% confidence interval.")
Expand All @@ -127,7 +126,7 @@ def _get_standard_deviation(


def get_lognorm_from_quantiles(median: float, lower: float, upper: float,
quantiles: Tuple[float, float] = (0.025, 0.975)) -> stats.lognorm:
quantiles: tuple[float, float] = (0.025, 0.975)) -> stats.lognorm:
"""Returns a frozen lognormal distribution with the specified median, such that
(lower, upper) are approximately equal to the quantiles with ranks
(quantile_ranks[0], quantile_ranks[1]).
Expand Down

0 comments on commit b7ae7fb

Please sign in to comment.