diff --git a/aepsych/benchmark/problem.py b/aepsych/benchmark/problem.py index 299a56290..0fb00031c 100644 --- a/aepsych/benchmark/problem.py +++ b/aepsych/benchmark/problem.py @@ -9,6 +9,7 @@ import aepsych import numpy as np +import numpy.typing as npt import torch from scipy.stats import bernoulli, norm, pearsonr from aepsych.strategy import SequentialStrategy, Strategy @@ -51,7 +52,7 @@ def metadata(self) -> Dict[str, Any]: Benchmark's output dataframe, with its associated value stored in each row.""" return {"name": self.name} - def p(self, x: np.ndarray) -> np.ndarray: + def p(self, x: npt.NDArray) -> npt.NDArray: """Evaluate response probability from test function. Args: @@ -62,7 +63,7 @@ def p(self, x: np.ndarray) -> np.ndarray: """ return norm.cdf(self.f(x)) - def sample_y(self, x: np.ndarray) -> np.ndarray: + def sample_y(self, x: npt.NDArray) -> npt.NDArray: """Sample a response from test function. Args: @@ -86,7 +87,7 @@ def f_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor: return f_hat @cached_property - def f_true(self) -> np.ndarray: + def f_true(self) -> npt.NDArray: """Evaluate true test function over evaluation grid. Returns: @@ -238,7 +239,7 @@ def inverse_link(x): return inverse_link(self.thresholds).astype(np.float32) @cached_property - def true_below_threshold(self) -> np.ndarray: + def true_below_threshold(self) -> npt.NDArray: """ Evaluate whether the true function is below threshold over the eval grid (used for proper scoring and threshold missclassification metric). diff --git a/aepsych/benchmark/test_functions.py b/aepsych/benchmark/test_functions.py index fcd73ff79..bf29fd9f8 100644 --- a/aepsych/benchmark/test_functions.py +++ b/aepsych/benchmark/test_functions.py @@ -10,6 +10,7 @@ from typing import Callable import numpy as np +import numpy.typing as npt import pandas as pd from scipy.interpolate import CubicSpline, interp1d from scipy.stats import norm @@ -53,7 +54,7 @@ dubno_data = pd.read_csv(io.StringIO(raw)) -def make_songetal_threshfun(x: np.ndarray, y: np.ndarray) -> Callable[[float], float]: +def make_songetal_threshfun(x: npt.NDArray, y: npt.NDArray) -> Callable[[float], float]: """Generate a synthetic threshold function by interpolation of real data. Real data is from Dubno et al. 2013, and procedure follows Song et al. 2017, 2018. @@ -83,7 +84,7 @@ def f_combo(x): def make_songetal_testfun( phenotype: str = "Metabolic", beta: float = 1 -) -> Callable[[np.ndarray, bool], np.ndarray]: +) -> Callable[[npt.NDArray, bool], npt.NDArray]: """Make an audiometric test function following Song et al. 2017. To do so,we first compute a threshold by interpolation/extrapolation @@ -129,7 +130,7 @@ def song_testfun(x, cdf=False): return song_testfun -def novel_discrimination_testfun(x: np.ndarray) -> np.ndarray: +def novel_discrimination_testfun(x: npt.NDArray) -> npt.NDArray: """Evaluate novel discrimination test function from Owen et al. The threshold is roughly parabolic with context, and the slope diff --git a/aepsych/config.py b/aepsych/config.py index a6d04af3a..06e8ba2c2 100644 --- a/aepsych/config.py +++ b/aepsych/config.py @@ -16,6 +16,7 @@ import botorch import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.version import __version__ @@ -175,7 +176,7 @@ def _str_to_list(self, v: str, element_type: _T = float) -> List[_T]: else: return [v.strip()] - def _str_to_array(self, v: str) -> np.ndarray: + def _str_to_array(self, v: str) -> npt.NDArray: v = ast.literal_eval(v) return np.array(v, dtype=float) diff --git a/aepsych/generators/manual_generator.py b/aepsych/generators/manual_generator.py index f794db2ab..c291108d4 100644 --- a/aepsych/generators/manual_generator.py +++ b/aepsych/generators/manual_generator.py @@ -9,6 +9,7 @@ from typing import Optional, Union, Dict import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator @@ -24,9 +25,9 @@ class ManualGenerator(AEPsychGenerator): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], - points: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], + points: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, shuffle: bool = True, seed: Optional[int] = None, @@ -101,10 +102,10 @@ class SampleAroundPointsGenerator(ManualGenerator): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], - window: Union[np.ndarray, torch.Tensor], - points: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], + window: Union[npt.NDArray, torch.Tensor], + points: Union[npt.NDArray, torch.Tensor], samples_per_point: int, dim: Optional[int] = None, shuffle: bool = True, diff --git a/aepsych/generators/random_generator.py b/aepsych/generators/random_generator.py index 41acc8546..7dec19bbb 100644 --- a/aepsych/generators/random_generator.py +++ b/aepsych/generators/random_generator.py @@ -8,6 +8,7 @@ from typing import Dict, Optional, Union import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator @@ -22,8 +23,8 @@ class RandomGenerator(AEPsychGenerator): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, ): """Iniatialize RandomGenerator. diff --git a/aepsych/generators/sobol_generator.py b/aepsych/generators/sobol_generator.py index ce54150f3..31226f37f 100644 --- a/aepsych/generators/sobol_generator.py +++ b/aepsych/generators/sobol_generator.py @@ -9,6 +9,7 @@ from typing import Dict, Optional, Union import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator @@ -24,8 +25,8 @@ class SobolGenerator(AEPsychGenerator): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, seed: Optional[int] = None, stimuli_per_trial: int = 1, diff --git a/aepsych/models/base.py b/aepsych/models/base.py index 16d8c0cb1..3553cacd9 100644 --- a/aepsych/models/base.py +++ b/aepsych/models/base.py @@ -14,6 +14,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config, ConfigurableMixin @@ -91,7 +92,7 @@ def _get_extremum( extremum_type: str, locked_dims: Optional[Mapping[int, List[float]]], n_samples=1000, - ) -> Tuple[float, np.ndarray]: + ) -> Tuple[float, npt.NDArray]: pass def dim_grid(self, gridsize: int = 30) -> torch.Tensor: @@ -105,7 +106,7 @@ def update( ) -> None: pass - def p_below_threshold(self, x, f_thresh) -> np.ndarray: + def p_below_threshold(self, x, f_thresh) -> npt.NDArray: pass @@ -216,7 +217,7 @@ def inv_query( def get_jnd( self: ModelProtocol, - grid: Optional[Union[np.ndarray, torch.Tensor]] = None, + grid: Optional[Union[npt.NDArray, torch.Tensor]] = None, cred_level: Optional[float] = None, intensity_dim: int = -1, confsamps: int = 500, @@ -378,7 +379,7 @@ def _fit_mll( ) return res - def p_below_threshold(self, x, f_thresh) -> np.ndarray: + def p_below_threshold(self, x, f_thresh) -> npt.NDArray: f, var = self.predict(x) f_thresh = f_thresh.reshape(-1, 1) f = f.reshape(1, -1) diff --git a/aepsych/models/gp_classification.py b/aepsych/models/gp_classification.py index 4d9e087df..8dfbbf28a 100644 --- a/aepsych/models/gp_classification.py +++ b/aepsych/models/gp_classification.py @@ -11,6 +11,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory @@ -47,8 +48,8 @@ class GPClassificationModel(AEPsychMixin, ApproximateGP): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, @@ -232,7 +233,7 @@ def fit( self._fit_mll(mll, **kwargs) def sample( - self, x: Union[torch.Tensor, np.ndarray], num_samples: int + self, x: Union[torch.Tensor, npt.NDArray], num_samples: int ) -> torch.Tensor: """Sample from underlying model. @@ -247,7 +248,7 @@ def sample( return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze() def predict( - self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False + self, x: Union[torch.Tensor, npt.NDArray], probability_space: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: """Query the model for posterior mean and variance. @@ -288,7 +289,7 @@ def predict( return promote_0d(fmean), promote_0d(fvar) def predict_probability( - self, x: Union[torch.Tensor, np.ndarray] + self, x: Union[torch.Tensor, npt.NDArray] ) -> Tuple[torch.Tensor, torch.Tensor]: return self.predict(x, probability_space=True) @@ -304,8 +305,8 @@ class GPBetaRegressionModel(GPClassificationModel): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, diff --git a/aepsych/models/gp_regression.py b/aepsych/models/gp_regression.py index bab7e0b22..bec016eb7 100644 --- a/aepsych/models/gp_regression.py +++ b/aepsych/models/gp_regression.py @@ -11,6 +11,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory @@ -33,8 +34,8 @@ class GPRegressionModel(AEPsychMixin, ExactGP): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, @@ -140,7 +141,7 @@ def fit(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs) -> None: return self._fit_mll(mll, **kwargs) def sample( - self, x: Union[torch.Tensor, np.ndarray], num_samples: int + self, x: Union[torch.Tensor, npt.NDArray], num_samples: int ) -> torch.Tensor: """Sample from underlying model. @@ -159,7 +160,7 @@ def update(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs): return self.fit(train_x, train_y, **kwargs) def predict( - self, x: Union[torch.Tensor, np.ndarray], **kwargs + self, x: Union[torch.Tensor, npt.NDArray], **kwargs ) -> Tuple[torch.Tensor, torch.Tensor]: """Query the model for posterior mean and variance. diff --git a/aepsych/models/monotonic_projection_gp.py b/aepsych/models/monotonic_projection_gp.py index f9d0f334d..0f375e2ae 100644 --- a/aepsych/models/monotonic_projection_gp.py +++ b/aepsych/models/monotonic_projection_gp.py @@ -11,6 +11,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory @@ -92,8 +93,8 @@ class MonotonicProjectionGP(GPClassificationModel): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], monotonic_dims: List[int], monotonic_grid_size: int = 20, min_f_val: Optional[float] = None, @@ -135,7 +136,7 @@ def posterior( for i, dim in enumerate(self.monotonic_dims): # using numpy because torch doesn't support vectorized linspace, # pytorch/issues/61292 - grid: Union[np.ndarray, torch.Tensor] = np.linspace( + grid: Union[npt.NDArray, torch.Tensor] = np.linspace( self.lb[dim], X[:, dim].numpy(), s + 1, @@ -167,7 +168,7 @@ def posterior( return GPyTorchPosterior(mvn_proj) def sample( - self, x: Union[torch.Tensor, np.ndarray], num_samples: int + self, x: Union[torch.Tensor, npt.NDArray], num_samples: int ) -> torch.Tensor: samps = super().sample(x=x, num_samples=num_samples) if self.min_f_val is not None: diff --git a/aepsych/models/monotonic_rejection_gp.py b/aepsych/models/monotonic_rejection_gp.py index 21aee95c2..2a91b088d 100644 --- a/aepsych/models/monotonic_rejection_gp.py +++ b/aepsych/models/monotonic_rejection_gp.py @@ -12,6 +12,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.acquisition.rejection_sampler import RejectionSampler from aepsych.config import Config @@ -52,8 +53,8 @@ class MonotonicRejectionGP(AEPsychMixin, ApproximateGP): def __init__( self, monotonic_idxs: Sequence[int], - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, mean_module: Optional[Mean] = None, covar_module: Optional[Kernel] = None, @@ -280,7 +281,7 @@ def predict( return mean, variance def predict_probability( - self, x: Union[torch.Tensor, np.ndarray] + self, x: Union[torch.Tensor, npt.NDArray] ) -> Tuple[torch.Tensor, torch.Tensor]: return self.predict(x, probability_space=True) diff --git a/aepsych/models/pairwise_probit.py b/aepsych/models/pairwise_probit.py index 1fc192d58..571229c65 100644 --- a/aepsych/models/pairwise_probit.py +++ b/aepsych/models/pairwise_probit.py @@ -9,6 +9,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config from aepsych.factory import default_mean_covar_factory @@ -57,8 +58,8 @@ def _get_index_of_equal_row(arr, x, dim=0): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, max_fit_time: Optional[float] = None, diff --git a/aepsych/models/semi_p.py b/aepsych/models/semi_p.py index a70b2acae..c8d6b7382 100644 --- a/aepsych/models/semi_p.py +++ b/aepsych/models/semi_p.py @@ -12,6 +12,7 @@ import gpytorch import numpy as np +import numpy.typing as npt import torch from aepsych.acquisition.objective import FloorLogitObjective @@ -173,8 +174,8 @@ class SemiParametricGPModel(GPClassificationModel): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, stim_dim: int = 0, mean_module: Optional[gpytorch.means.Mean] = None, @@ -332,7 +333,7 @@ def fit( def sample( self, - x: Union[torch.Tensor, np.ndarray], + x: Union[torch.Tensor, npt.NDArray], num_samples: int, probability_space=False, ) -> torch.Tensor: @@ -356,7 +357,7 @@ def sample( return samps.squeeze(1) def predict( - self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False + self, x: Union[torch.Tensor, npt.NDArray], probability_space: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: """Query the model for posterior mean and variance. @@ -418,8 +419,8 @@ class HadamardSemiPModel(GPClassificationModel): def __init__( self, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], dim: Optional[int] = None, stim_dim: int = 0, slope_mean_module: Optional[gpytorch.means.Mean] = None, @@ -604,7 +605,7 @@ def from_config(cls, config: Config) -> HadamardSemiPModel: ) def predict( - self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False + self, x: Union[torch.Tensor, npt.NDArray], probability_space: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: """Query the model for posterior mean and variance. diff --git a/aepsych/models/utils.py b/aepsych/models/utils.py index 874845f49..92192bce9 100644 --- a/aepsych/models/utils.py +++ b/aepsych/models/utils.py @@ -11,6 +11,7 @@ from typing import List, Mapping, Optional, Tuple, Union import numpy as np +import numpy.typing as npt import torch from botorch.acquisition import PosteriorMean from botorch.acquisition.objective import PosteriorTransform @@ -55,7 +56,7 @@ def select_inducing_points( inducing_size: int, covar_module: Kernel = None, X: Optional[torch.Tensor] = None, - bounds: Optional[Union[torch.Tensor, np.ndarray]] = None, + bounds: Optional[Union[torch.Tensor, npt.NDArray]] = None, method: str = "auto", ): with torch.no_grad(): diff --git a/aepsych/strategy.py b/aepsych/strategy.py index 704dd09fd..39ffa67f7 100644 --- a/aepsych/strategy.py +++ b/aepsych/strategy.py @@ -13,6 +13,7 @@ from typing import List, Optional, Sequence, Tuple, Type, Union import numpy as np +import numpy.typing as npt import torch from aepsych.config import Config @@ -56,8 +57,8 @@ class Strategy(object): def __init__( self, generator: AEPsychGenerator, - lb: Union[np.ndarray, torch.Tensor], - ub: Union[np.ndarray, torch.Tensor], + lb: Union[npt.NDArray, torch.Tensor], + ub: Union[npt.NDArray, torch.Tensor], stimuli_per_trial: int, outcome_types: Sequence[Type[str]], dim: Optional[int] = None, diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 8d59ec276..678ec21c2 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -11,6 +11,7 @@ import unittest import numpy as np +import numpy.typing as npt import torch from aepsych.benchmark import ( Benchmark, @@ -76,7 +77,7 @@ def setUp(self): lb=self.test_problem.lb, ub=self.test_problem.ub ) - def unvectorized_p_below_threshold(self, x, f_thresh) -> np.ndarray: + def unvectorized_p_below_threshold(self, x, f_thresh) -> npt.NDArray: """this is the original p_below_threshold method in the AEPsychMixin that calculates model prediction of the probability of the stimulus being below a threshold for one single threshold"""