From 3f63afc81fe68ca259e11023ce2e85ef7ca23ef5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 22:07:58 +0000 Subject: [PATCH] deploy: 669738f46f47cdfb10db8b222510276d2ab00a7f --- .../aepsych/acquisition/lookahead.html | 41 +- .../aepsych/acquisition/lookahead/index.html | 41 +- api/_modules/aepsych/benchmark/benchmark.html | 1 + .../aepsych/benchmark/benchmark/index.html | 1 + .../aepsych/benchmark/example_problems.html | 12 +- .../benchmark/example_problems/index.html | 12 +- .../acqf_thompson_sampler_generator.html | 12 +- .../index.html | 12 +- .../generators/epsilon_greedy_generator.html | 18 +- .../epsilon_greedy_generator/index.html | 18 +- .../monotonic_rejection_generator.html | 15 +- .../monotonic_rejection_generator/index.html | 15 +- .../generators/optimize_acqf_generator.html | 41 +- .../optimize_acqf_generator/index.html | 41 +- .../aepsych/generators/sobol_generator.html | 1 - .../generators/sobol_generator/index.html | 1 - api/_modules/aepsych/models/base.html | 220 +----- api/_modules/aepsych/models/base/index.html | 220 +----- .../aepsych/models/gp_classification.html | 94 +-- .../models/gp_classification/index.html | 94 +-- .../aepsych/models/gp_regression.html | 27 +- .../aepsych/models/gp_regression/index.html | 27 +- .../models/inducing_point_allocators.html | 671 ------------------ .../inducing_point_allocators/index.html | 671 ------------------ .../models/monotonic_projection_gp.html | 35 +- .../models/monotonic_projection_gp/index.html | 35 +- .../models/monotonic_rejection_gp.html | 36 +- .../models/monotonic_rejection_gp/index.html | 36 +- .../aepsych/models/pairwise_probit.html | 4 +- .../aepsych/models/pairwise_probit/index.html | 4 +- api/_modules/aepsych/models/semi_p.html | 76 +- api/_modules/aepsych/models/semi_p/index.html | 76 +- api/_modules/aepsych/plotting.html | 1 - api/_modules/aepsych/plotting/index.html | 1 - api/_modules/aepsych/strategy.html | 72 +- api/_modules/aepsych/strategy/index.html | 72 +- api/_modules/aepsych/utils.html | 26 + api/_modules/aepsych/utils/index.html | 26 + api/_modules/index.html | 1 - api/acquisition.html | 145 ++-- api/acquisition/index.html | 145 ++-- api/config.html | 2 +- api/config/index.html | 2 +- api/generators.html | 68 +- api/generators/index.html | 68 +- api/genindex.html | 89 +-- api/models.html | 622 ++-------------- api/models/index.html | 622 ++-------------- api/utils.html | 18 + api/utils/index.html | 18 + demos/ParticleEffectDemo.html | 2 +- demos/ParticleEffectDemo/index.html | 2 +- demos/ThrowOptimizerDemo.html | 2 +- demos/ThrowOptimizerDemo/index.html | 2 +- demos/YannyLaurelDemo.html | 2 +- demos/YannyLaurelDemo/index.html | 2 +- js/searchindex.js | 2 +- .../data_collection_analysis_tutorial.html | 78 +- .../index.html | 78 +- 59 files changed, 1149 insertions(+), 3627 deletions(-) delete mode 100644 api/_modules/aepsych/models/inducing_point_allocators.html delete mode 100644 api/_modules/aepsych/models/inducing_point_allocators/index.html diff --git a/api/_modules/aepsych/acquisition/lookahead.html b/api/_modules/aepsych/acquisition/lookahead.html index 5aacf4d40..28876d0d4 100644 --- a/api/_modules/aepsych/acquisition/lookahead.html +++ b/api/_modules/aepsych/acquisition/lookahead.html @@ -34,6 +34,7 @@

Source code for aepsych.acquisition.lookahead

from botorch.models.gpytorch import GPyTorchModel from botorch.utils.transforms import t_batch_mode_transform from scipy.stats import norm +from torch import Tensor from .lookahead_utils import ( approximate_lookahead_levelset_at_xstar, @@ -263,6 +264,8 @@

Source code for aepsych.acquisition.lookahead

[docs]class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction): def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, lookahead_type: Literal["levelset", "posterior"] = "levelset", target: Optional[float] = None, @@ -274,14 +277,16 @@

Source code for aepsych.acquisition.lookahead

A global look-ahead acquisition function. Args: - model (GPyTorchModel): The gpytorch model to use. + lb (Tensor): Lower bounds of the input space, used to generate the query set (Xq). + ub (Tensor): Upper bounds of the input space, used to generate the query set (Xq). + model (GPyTorchModel): The gpytorch model. lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. - posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior. - query_set_size (int, optional): Number of points in the query set. - Xq (torch.Tensor, optional): (m x d) global reference set. + posterior_transform (PosteriorTransform, optional): Posterior transform to use. Defaults to None. + query_set_size (int, optional): Size of the query set. Defaults to 256. + Xq (Tensor, optional): (m x d) global reference set. Defaults to None. """ super().__init__(model=model, target=target, lookahead_type=lookahead_type) self.posterior_transform = posterior_transform @@ -300,7 +305,7 @@

Source code for aepsych.acquisition.lookahead

assert int(query_set_size) == query_set_size # make sure casting is safe # if the asserts above pass and Xq is None, query_set_size is not None so this is safe query_set_size = int(query_set_size) # cast - Xq = make_scaled_sobol(model.lb, model.ub, query_set_size) + Xq = make_scaled_sobol(lb, ub, query_set_size) self.register_buffer("Xq", Xq) @t_batch_mode_transform(expected_q=1) @@ -353,8 +358,10 @@

Source code for aepsych.acquisition.lookahead

[docs]class ApproxGlobalSUR(GlobalSUR): def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, - lookahead_type="levelset", + lookahead_type: Literal["levelset", "poserior"] = "levelset", target: Optional[float] = None, query_set_size: Optional[int] = 256, Xq: Optional[torch.Tensor] = None, @@ -364,7 +371,9 @@

Source code for aepsych.acquisition.lookahead

Args: model (GPyTorchModel): The gpytorch model to use. - lookahed_type (str): The type of look-ahead to perform (default is "levelset"). + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. query_set_size (int, optional): Number of points in the query set. Xq (torch.Tensor, optional): (m x d) global reference set. @@ -373,6 +382,8 @@

Source code for aepsych.acquisition.lookahead

lookahead_type == "levelset" ), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!" super().__init__( + lb=lb, + ub=ub, model=model, target=target, lookahead_type=lookahead_type, @@ -449,8 +460,10 @@

Source code for aepsych.acquisition.lookahead

def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, - lookahead_type="posterior", + lookahead_type: Literal["levelset", "posterior"] = "posterior", target: Optional[float] = None, query_set_size: Optional[int] = 256, Xq: Optional[torch.Tensor] = None, @@ -458,7 +471,9 @@

Source code for aepsych.acquisition.lookahead

) -> None: """ model (GPyTorchModel): The gpytorch model to use. - lookahead_type (str): The type of look-ahead to perform (default is "posterior"). + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "posterior"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. Xq (torch.Tensor, optional): (m x d) global reference set. Default is None. @@ -466,6 +481,8 @@

Source code for aepsych.acquisition.lookahead

""" super().__init__( + lb=lb, + ub=ub, model=model, target=target, lookahead_type=lookahead_type, @@ -548,7 +565,7 @@

Source code for aepsych.acquisition.lookahead

def construct_inputs_global_lookahead( model: GPyTorchModel, training_data: None, - lookahead_type="levelset", + lookahead_type: Literal["levelset", "posterior"] = "levelset", target: Optional[float] = None, posterior_transform: Optional[PosteriorTransform] = None, query_set_size: Optional[int] = 256, @@ -561,7 +578,9 @@

Source code for aepsych.acquisition.lookahead

Args: model (GPyTorchModel): The gpytorch model to use. training_data (None): Placeholder for compatibility; not used in this function. - lookahead_type (str): Type of look-ahead to perform. Default is "levelset". + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Target threshold value in probability space. Default is None. posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. diff --git a/api/_modules/aepsych/acquisition/lookahead/index.html b/api/_modules/aepsych/acquisition/lookahead/index.html index 5aacf4d40..28876d0d4 100644 --- a/api/_modules/aepsych/acquisition/lookahead/index.html +++ b/api/_modules/aepsych/acquisition/lookahead/index.html @@ -34,6 +34,7 @@

Source code for aepsych.acquisition.lookahead

from botorch.models.gpytorch import GPyTorchModel from botorch.utils.transforms import t_batch_mode_transform from scipy.stats import norm +from torch import Tensor from .lookahead_utils import ( approximate_lookahead_levelset_at_xstar, @@ -263,6 +264,8 @@

Source code for aepsych.acquisition.lookahead

[docs]class GlobalLookaheadAcquisitionFunction(LookaheadAcquisitionFunction): def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, lookahead_type: Literal["levelset", "posterior"] = "levelset", target: Optional[float] = None, @@ -274,14 +277,16 @@

Source code for aepsych.acquisition.lookahead

A global look-ahead acquisition function. Args: - model (GPyTorchModel): The gpytorch model to use. + lb (Tensor): Lower bounds of the input space, used to generate the query set (Xq). + ub (Tensor): Upper bounds of the input space, used to generate the query set (Xq). + model (GPyTorchModel): The gpytorch model. lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. - posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior. - query_set_size (int, optional): Number of points in the query set. - Xq (torch.Tensor, optional): (m x d) global reference set. + posterior_transform (PosteriorTransform, optional): Posterior transform to use. Defaults to None. + query_set_size (int, optional): Size of the query set. Defaults to 256. + Xq (Tensor, optional): (m x d) global reference set. Defaults to None. """ super().__init__(model=model, target=target, lookahead_type=lookahead_type) self.posterior_transform = posterior_transform @@ -300,7 +305,7 @@

Source code for aepsych.acquisition.lookahead

assert int(query_set_size) == query_set_size # make sure casting is safe # if the asserts above pass and Xq is None, query_set_size is not None so this is safe query_set_size = int(query_set_size) # cast - Xq = make_scaled_sobol(model.lb, model.ub, query_set_size) + Xq = make_scaled_sobol(lb, ub, query_set_size) self.register_buffer("Xq", Xq) @t_batch_mode_transform(expected_q=1) @@ -353,8 +358,10 @@

Source code for aepsych.acquisition.lookahead

[docs]class ApproxGlobalSUR(GlobalSUR): def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, - lookahead_type="levelset", + lookahead_type: Literal["levelset", "poserior"] = "levelset", target: Optional[float] = None, query_set_size: Optional[int] = 256, Xq: Optional[torch.Tensor] = None, @@ -364,7 +371,9 @@

Source code for aepsych.acquisition.lookahead

Args: model (GPyTorchModel): The gpytorch model to use. - lookahed_type (str): The type of look-ahead to perform (default is "levelset"). + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. query_set_size (int, optional): Number of points in the query set. Xq (torch.Tensor, optional): (m x d) global reference set. @@ -373,6 +382,8 @@

Source code for aepsych.acquisition.lookahead

lookahead_type == "levelset" ), f"ApproxGlobalSUR only supports lookahead on level set, got {lookahead_type}!" super().__init__( + lb=lb, + ub=ub, model=model, target=target, lookahead_type=lookahead_type, @@ -449,8 +460,10 @@

Source code for aepsych.acquisition.lookahead

def __init__( self, + lb: Tensor, + ub: Tensor, model: GPyTorchModel, - lookahead_type="posterior", + lookahead_type: Literal["levelset", "posterior"] = "posterior", target: Optional[float] = None, query_set_size: Optional[int] = 256, Xq: Optional[torch.Tensor] = None, @@ -458,7 +471,9 @@

Source code for aepsych.acquisition.lookahead

) -> None: """ model (GPyTorchModel): The gpytorch model to use. - lookahead_type (str): The type of look-ahead to perform (default is "posterior"). + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "posterior"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Threshold value to target in p-space. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. Xq (torch.Tensor, optional): (m x d) global reference set. Default is None. @@ -466,6 +481,8 @@

Source code for aepsych.acquisition.lookahead

""" super().__init__( + lb=lb, + ub=ub, model=model, target=target, lookahead_type=lookahead_type, @@ -548,7 +565,7 @@

Source code for aepsych.acquisition.lookahead

def construct_inputs_global_lookahead( model: GPyTorchModel, training_data: None, - lookahead_type="levelset", + lookahead_type: Literal["levelset", "posterior"] = "levelset", target: Optional[float] = None, posterior_transform: Optional[PosteriorTransform] = None, query_set_size: Optional[int] = 256, @@ -561,7 +578,9 @@

Source code for aepsych.acquisition.lookahead

Args: model (GPyTorchModel): The gpytorch model to use. training_data (None): Placeholder for compatibility; not used in this function. - lookahead_type (str): Type of look-ahead to perform. Default is "levelset". + lookahead_type (Literal["levelset", "posterior"]): The type of look-ahead to perform (default is "levelset"). + - If the lookahead_type is "levelset", the acqf will consider the posterior probability that a point is above or below the target level set. + - If the lookahead_type is "posterior", the acqf will consider the posterior probability that a point will be detected or not. target (float, optional): Target threshold value in probability space. Default is None. posterior_transform (PosteriorTransform, optional): Optional transformation to apply to the posterior. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. diff --git a/api/_modules/aepsych/benchmark/benchmark.html b/api/_modules/aepsych/benchmark/benchmark.html index 292fcf019..0b45a641a 100644 --- a/api/_modules/aepsych/benchmark/benchmark.html +++ b/api/_modules/aepsych/benchmark/benchmark.html @@ -173,6 +173,7 @@

Source code for aepsych.benchmark.benchmark

         np.random.seed(seed)
         config_dict["common"]["lb"] = str(problem.lb.tolist())
         config_dict["common"]["ub"] = str(problem.ub.tolist())
+        config_dict["common"]["dim"] = str(problem.lb.shape[0])
         config_dict["common"]["parnames"] = str(
             [f"par{i}" for i in range(len(problem.ub.tolist()))]
         )
diff --git a/api/_modules/aepsych/benchmark/benchmark/index.html b/api/_modules/aepsych/benchmark/benchmark/index.html
index 292fcf019..0b45a641a 100644
--- a/api/_modules/aepsych/benchmark/benchmark/index.html
+++ b/api/_modules/aepsych/benchmark/benchmark/index.html
@@ -173,6 +173,7 @@ 

Source code for aepsych.benchmark.benchmark

         np.random.seed(seed)
         config_dict["common"]["lb"] = str(problem.lb.tolist())
         config_dict["common"]["ub"] = str(problem.ub.tolist())
+        config_dict["common"]["dim"] = str(problem.lb.shape[0])
         config_dict["common"]["parnames"] = str(
             [f"par{i}" for i in range(len(problem.ub.tolist()))]
         )
diff --git a/api/_modules/aepsych/benchmark/example_problems.html b/api/_modules/aepsych/benchmark/example_problems.html
index d83152ffb..936afccc0 100644
--- a/api/_modules/aepsych/benchmark/example_problems.html
+++ b/api/_modules/aepsych/benchmark/example_problems.html
@@ -18,7 +18,7 @@
 

Source code for aepsych.benchmark.example_problems

 # Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
 import os
-from typing import List, Optional, Union
+from typing import List, Union
 
 import numpy as np
 import torch
@@ -29,7 +29,7 @@ 

Source code for aepsych.benchmark.example_problems

novel_discrimination_testfun, ) from aepsych.models import GPClassificationModel -from aepsych.models.inducing_point_allocators import KMeansAllocator +from aepsych.models.inducing_points import KMeansAllocator """The DiscrimLowDim, DiscrimHighDim, ContrastSensitivity6d, and Hartmann6Binary classes are copied from bernoulli_lse github repository (https://github.com/facebookresearch/bernoulli_lse) @@ -122,13 +122,13 @@

Source code for aepsych.benchmark.example_problems

) y = torch.LongTensor(self.data[:, 0]) x = torch.Tensor(self.data[:, 1:]) + inducing_size = 100 # Fit a model, with a large number of inducing points self.m = GPClassificationModel( - lb=self.bounds[0], - ub=self.bounds[1], - inducing_size=100, - inducing_point_method=KMeansAllocator(bounds=self.bounds), + dim=6, + inducing_size=inducing_size, + inducing_point_method=KMeansAllocator(dim=6), ) self.m.fit( diff --git a/api/_modules/aepsych/benchmark/example_problems/index.html b/api/_modules/aepsych/benchmark/example_problems/index.html index d83152ffb..936afccc0 100644 --- a/api/_modules/aepsych/benchmark/example_problems/index.html +++ b/api/_modules/aepsych/benchmark/example_problems/index.html @@ -18,7 +18,7 @@

Source code for aepsych.benchmark.example_problems

 # Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
 import os
-from typing import List, Optional, Union
+from typing import List, Union
 
 import numpy as np
 import torch
@@ -29,7 +29,7 @@ 

Source code for aepsych.benchmark.example_problems

novel_discrimination_testfun, ) from aepsych.models import GPClassificationModel -from aepsych.models.inducing_point_allocators import KMeansAllocator +from aepsych.models.inducing_points import KMeansAllocator """The DiscrimLowDim, DiscrimHighDim, ContrastSensitivity6d, and Hartmann6Binary classes are copied from bernoulli_lse github repository (https://github.com/facebookresearch/bernoulli_lse) @@ -122,13 +122,13 @@

Source code for aepsych.benchmark.example_problems

) y = torch.LongTensor(self.data[:, 0]) x = torch.Tensor(self.data[:, 1:]) + inducing_size = 100 # Fit a model, with a large number of inducing points self.m = GPClassificationModel( - lb=self.bounds[0], - ub=self.bounds[1], - inducing_size=100, - inducing_point_method=KMeansAllocator(bounds=self.bounds), + dim=6, + inducing_size=inducing_size, + inducing_point_method=KMeansAllocator(dim=6), ) self.m.fit( diff --git a/api/_modules/aepsych/generators/acqf_thompson_sampler_generator.html b/api/_modules/aepsych/generators/acqf_thompson_sampler_generator.html index 7017658cc..43628ffd3 100644 --- a/api/_modules/aepsych/generators/acqf_thompson_sampler_generator.html +++ b/api/_modules/aepsych/generators/acqf_thompson_sampler_generator.html @@ -59,6 +59,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

def __init__( self, + lb: torch.Tensor, + ub: torch.Tensor, acqf: AcquisitionFunction, acqf_kwargs: Optional[Dict[str, Any]] = None, samps: int = 1000, @@ -66,6 +68,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

) -> None: """Initialize OptimizeAcqfGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf (AcquisitionFunction): Acquisition function to use. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to no arguments. @@ -79,6 +83,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

self.acqf_kwargs = acqf_kwargs self.samps = samps self.stimuli_per_trial = stimuli_per_trial + self.lb = lb + self.ub = ub def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction: """Instantiate the acquisition function with the model and any extra arguments. @@ -142,7 +148,7 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

starttime = time.time() seed = gen_options.get("seed") - bounds = torch.tensor(np.c_[model.lb, model.ub]).T.cpu() + bounds = torch.tensor(np.c_[self.lb, self.ub]).T.cpu() bounds_cpu = bounds.cpu() effective_dim = bounds.shape[-1] * num_points if effective_dim <= SobolEngine.MAXDIM: @@ -178,12 +184,16 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

AcqfThompsonSamplerGenerator: The initialized generator. """ classname = cls.__name__ + lb = config.gettensor(classname, "lb") + ub = config.gettensor(classname, "ub") acqf = config.getobj(classname, "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) stimuli_per_trial = config.getint(classname, "stimuli_per_trial") samps = config.getint(classname, "samps", fallback=1000) return cls( + lb=lb, + ub=ub, acqf=acqf, acqf_kwargs=extra_acqf_args, samps=samps, diff --git a/api/_modules/aepsych/generators/acqf_thompson_sampler_generator/index.html b/api/_modules/aepsych/generators/acqf_thompson_sampler_generator/index.html index 7017658cc..43628ffd3 100644 --- a/api/_modules/aepsych/generators/acqf_thompson_sampler_generator/index.html +++ b/api/_modules/aepsych/generators/acqf_thompson_sampler_generator/index.html @@ -59,6 +59,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

def __init__( self, + lb: torch.Tensor, + ub: torch.Tensor, acqf: AcquisitionFunction, acqf_kwargs: Optional[Dict[str, Any]] = None, samps: int = 1000, @@ -66,6 +68,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

) -> None: """Initialize OptimizeAcqfGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf (AcquisitionFunction): Acquisition function to use. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to no arguments. @@ -79,6 +83,8 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

self.acqf_kwargs = acqf_kwargs self.samps = samps self.stimuli_per_trial = stimuli_per_trial + self.lb = lb + self.ub = ub def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction: """Instantiate the acquisition function with the model and any extra arguments. @@ -142,7 +148,7 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

starttime = time.time() seed = gen_options.get("seed") - bounds = torch.tensor(np.c_[model.lb, model.ub]).T.cpu() + bounds = torch.tensor(np.c_[self.lb, self.ub]).T.cpu() bounds_cpu = bounds.cpu() effective_dim = bounds.shape[-1] * num_points if effective_dim <= SobolEngine.MAXDIM: @@ -178,12 +184,16 @@

Source code for aepsych.generators.acqf_thompson_sampler_generator

AcqfThompsonSamplerGenerator: The initialized generator. """ classname = cls.__name__ + lb = config.gettensor(classname, "lb") + ub = config.gettensor(classname, "ub") acqf = config.getobj(classname, "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) stimuli_per_trial = config.getint(classname, "stimuli_per_trial") samps = config.getint(classname, "samps", fallback=1000) return cls( + lb=lb, + ub=ub, acqf=acqf, acqf_kwargs=extra_acqf_args, samps=samps, diff --git a/api/_modules/aepsych/generators/epsilon_greedy_generator.html b/api/_modules/aepsych/generators/epsilon_greedy_generator.html index 0ba3c24dc..05f8bb0c6 100644 --- a/api/_modules/aepsych/generators/epsilon_greedy_generator.html +++ b/api/_modules/aepsych/generators/epsilon_greedy_generator.html @@ -33,15 +33,25 @@

Source code for aepsych.generators.epsilon_greedy_generator

[docs]class EpsilonGreedyGenerator(AEPsychGenerator): - def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1) -> None: + def __init__( + self, + lb: torch.Tensor, + ub: torch.Tensor, + subgenerator: AEPsychGenerator, + epsilon: float = 0.1, + ) -> None: """Initialize EpsilonGreedyGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. subgenerator (AEPsychGenerator): The generator to use when not exploiting. epsilon (float): The probability of exploration. Defaults to 0.1. """ self.subgenerator = subgenerator self.epsilon = epsilon + self.lb = lb + self.ub = ub
[docs] @classmethod def from_config(cls, config: Config) -> "EpsilonGreedyGenerator": @@ -54,12 +64,14 @@

Source code for aepsych.generators.epsilon_greedy_generator

EpsilonGreedyGenerator: The generator. """ classname = cls.__name__ + lb = torch.tensor(config.getlist(classname, "lb")) + ub = torch.tensor(config.getlist(classname, "ub")) subgen_cls = config.getobj( classname, "subgenerator", fallback=OptimizeAcqfGenerator ) subgen = subgen_cls.from_config(config) epsilon = config.getfloat(classname, "epsilon", fallback=0.1) - return cls(subgenerator=subgen, epsilon=epsilon)
+ return cls(lb=lb, ub=ub, subgenerator=subgen, epsilon=epsilon)
[docs] def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor: """Query next point(s) to run by sampling from the subgenerator with probability 1-epsilon, and randomly otherwise. @@ -71,7 +83,7 @@

Source code for aepsych.generators.epsilon_greedy_generator

if num_points > 1: raise NotImplementedError("Epsilon-greedy batched gen is not implemented!") if np.random.uniform() < self.epsilon: - sample = np.random.uniform(low=model.lb, high=model.ub) + sample = np.random.uniform(low=self.lb, high=self.ub) return torch.tensor(sample).reshape(1, -1) else: return self.subgenerator.gen(num_points, model)
diff --git a/api/_modules/aepsych/generators/epsilon_greedy_generator/index.html b/api/_modules/aepsych/generators/epsilon_greedy_generator/index.html index 0ba3c24dc..05f8bb0c6 100644 --- a/api/_modules/aepsych/generators/epsilon_greedy_generator/index.html +++ b/api/_modules/aepsych/generators/epsilon_greedy_generator/index.html @@ -33,15 +33,25 @@

Source code for aepsych.generators.epsilon_greedy_generator

[docs]class EpsilonGreedyGenerator(AEPsychGenerator): - def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1) -> None: + def __init__( + self, + lb: torch.Tensor, + ub: torch.Tensor, + subgenerator: AEPsychGenerator, + epsilon: float = 0.1, + ) -> None: """Initialize EpsilonGreedyGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. subgenerator (AEPsychGenerator): The generator to use when not exploiting. epsilon (float): The probability of exploration. Defaults to 0.1. """ self.subgenerator = subgenerator self.epsilon = epsilon + self.lb = lb + self.ub = ub
[docs] @classmethod def from_config(cls, config: Config) -> "EpsilonGreedyGenerator": @@ -54,12 +64,14 @@

Source code for aepsych.generators.epsilon_greedy_generator

EpsilonGreedyGenerator: The generator. """ classname = cls.__name__ + lb = torch.tensor(config.getlist(classname, "lb")) + ub = torch.tensor(config.getlist(classname, "ub")) subgen_cls = config.getobj( classname, "subgenerator", fallback=OptimizeAcqfGenerator ) subgen = subgen_cls.from_config(config) epsilon = config.getfloat(classname, "epsilon", fallback=0.1) - return cls(subgenerator=subgen, epsilon=epsilon)
+ return cls(lb=lb, ub=ub, subgenerator=subgen, epsilon=epsilon)
[docs] def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor: """Query next point(s) to run by sampling from the subgenerator with probability 1-epsilon, and randomly otherwise. @@ -71,7 +83,7 @@

Source code for aepsych.generators.epsilon_greedy_generator

if num_points > 1: raise NotImplementedError("Epsilon-greedy batched gen is not implemented!") if np.random.uniform() < self.epsilon: - sample = np.random.uniform(low=model.lb, high=model.ub) + sample = np.random.uniform(low=self.lb, high=self.ub) return torch.tensor(sample).reshape(1, -1) else: return self.subgenerator.gen(num_points, model)
diff --git a/api/_modules/aepsych/generators/monotonic_rejection_generator.html b/api/_modules/aepsych/generators/monotonic_rejection_generator.html index 2d85d9f62..11eb36bcb 100644 --- a/api/_modules/aepsych/generators/monotonic_rejection_generator.html +++ b/api/_modules/aepsych/generators/monotonic_rejection_generator.html @@ -30,6 +30,7 @@

Source code for aepsych.generators.monotonic_rejection_generator

from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP +from aepsych.utils import _process_bounds from botorch.acquisition import AcquisitionFunction from botorch.logging import logger from botorch.optim.initializers import gen_batch_initial_conditions @@ -61,13 +62,17 @@

Source code for aepsych.generators.monotonic_rejection_generator

def __init__( self, acqf: MonotonicMCAcquisition, + lb: torch.Tensor, + ub: torch.Tensor, acqf_kwargs: Optional[Dict[str, Any]] = None, model_gen_options: Optional[Dict[str, Any]] = None, explore_features: Optional[Sequence[int]] = None, ) -> None: """Initialize MonotonicRejectionGenerator. Args: - acqf (MonotonicMCAcquisition): Acquisition function to use. + acqf (AcquisitionFunction): Acquisition function to use. + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to None. model_gen_options (Dict[str, Any], optional): Dictionary with options for generating candidate, such as @@ -81,6 +86,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

self.acqf_kwargs = acqf_kwargs self.model_gen_options = model_gen_options self.explore_features = explore_features + self.lb, self.ub, _ = _process_bounds(lb, ub, None) + self.bounds = torch.stack((self.lb, self.ub)) def _instantiate_acquisition_fn( self, model: MonotonicRejectionGP @@ -128,7 +135,7 @@

Source code for aepsych.generators.monotonic_rejection_generator

) # Augment bounds with deriv indicator - bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1) + bounds = torch.cat((self.bounds, torch.zeros(2, 1)), dim=1) # Fix deriv indicator to 0 during optimization fixed_features = {(bounds.shape[1] - 1): 0.0} # Fix explore features to random values @@ -210,6 +217,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

classname = cls.__name__ acqf = config.getobj("common", "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) + lb = torch.tensor(config.getlist(classname, "lb")) + ub = torch.tensor(config.getlist(classname, "ub")) options = {} options["num_restarts"] = config.getint(classname, "restarts", fallback=10) @@ -235,6 +244,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

return cls( acqf=acqf, + lb=lb, + ub=ub, acqf_kwargs=extra_acqf_args, model_gen_options=options, explore_features=explore_features, diff --git a/api/_modules/aepsych/generators/monotonic_rejection_generator/index.html b/api/_modules/aepsych/generators/monotonic_rejection_generator/index.html index 2d85d9f62..11eb36bcb 100644 --- a/api/_modules/aepsych/generators/monotonic_rejection_generator/index.html +++ b/api/_modules/aepsych/generators/monotonic_rejection_generator/index.html @@ -30,6 +30,7 @@

Source code for aepsych.generators.monotonic_rejection_generator

from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP +from aepsych.utils import _process_bounds from botorch.acquisition import AcquisitionFunction from botorch.logging import logger from botorch.optim.initializers import gen_batch_initial_conditions @@ -61,13 +62,17 @@

Source code for aepsych.generators.monotonic_rejection_generator

def __init__( self, acqf: MonotonicMCAcquisition, + lb: torch.Tensor, + ub: torch.Tensor, acqf_kwargs: Optional[Dict[str, Any]] = None, model_gen_options: Optional[Dict[str, Any]] = None, explore_features: Optional[Sequence[int]] = None, ) -> None: """Initialize MonotonicRejectionGenerator. Args: - acqf (MonotonicMCAcquisition): Acquisition function to use. + acqf (AcquisitionFunction): Acquisition function to use. + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to None. model_gen_options (Dict[str, Any], optional): Dictionary with options for generating candidate, such as @@ -81,6 +86,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

self.acqf_kwargs = acqf_kwargs self.model_gen_options = model_gen_options self.explore_features = explore_features + self.lb, self.ub, _ = _process_bounds(lb, ub, None) + self.bounds = torch.stack((self.lb, self.ub)) def _instantiate_acquisition_fn( self, model: MonotonicRejectionGP @@ -128,7 +135,7 @@

Source code for aepsych.generators.monotonic_rejection_generator

) # Augment bounds with deriv indicator - bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1) + bounds = torch.cat((self.bounds, torch.zeros(2, 1)), dim=1) # Fix deriv indicator to 0 during optimization fixed_features = {(bounds.shape[1] - 1): 0.0} # Fix explore features to random values @@ -210,6 +217,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

classname = cls.__name__ acqf = config.getobj("common", "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) + lb = torch.tensor(config.getlist(classname, "lb")) + ub = torch.tensor(config.getlist(classname, "ub")) options = {} options["num_restarts"] = config.getint(classname, "restarts", fallback=10) @@ -235,6 +244,8 @@

Source code for aepsych.generators.monotonic_rejection_generator

return cls( acqf=acqf, + lb=lb, + ub=ub, acqf_kwargs=extra_acqf_args, model_gen_options=options, explore_features=explore_features, diff --git a/api/_modules/aepsych/generators/optimize_acqf_generator.html b/api/_modules/aepsych/generators/optimize_acqf_generator.html index b4587c340..b1c86c43d 100644 --- a/api/_modules/aepsych/generators/optimize_acqf_generator.html +++ b/api/_modules/aepsych/generators/optimize_acqf_generator.html @@ -23,10 +23,10 @@

Source code for aepsych.generators.optimize_acqf_generator

# LICENSE file in the root directory of this source tree. from __future__ import annotations +import inspect import time from typing import Any, Dict, Optional -import numpy as np import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator @@ -57,6 +57,8 @@

Source code for aepsych.generators.optimize_acqf_generator

def __init__( self, + lb: torch.Tensor, + ub: torch.Tensor, acqf: AcquisitionFunction, acqf_kwargs: Optional[Dict[str, Any]] = None, restarts: int = 10, @@ -66,6 +68,8 @@

Source code for aepsych.generators.optimize_acqf_generator

) -> None: """Initialize OptimizeAcqfGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf (AcquisitionFunction): Acquisition function to use. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to no arguments. @@ -83,6 +87,8 @@

Source code for aepsych.generators.optimize_acqf_generator

self.samps = samps self.max_gen_time = max_gen_time self.stimuli_per_trial = stimuli_per_trial + self.lb = lb + self.ub = ub def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction: """ @@ -94,14 +100,33 @@

Source code for aepsych.generators.optimize_acqf_generator

Returns: AcquisitionFunction: Configured acquisition function. """ + if ( + "lb" in inspect.signature(self.acqf).parameters + and "ub" in inspect.signature(self.acqf).parameters + ): + if self.acqf == AnalyticExpectedUtilityOfBestOption: + return self.acqf(pref_model=model, lb=self.lb, ub=self.ub) + + self.lb = self.lb.to(model.device) + self.ub = self.ub.to(model.device) + if self.acqf in self.baseline_requiring_acqfs: + return self.acqf( + model, + model.train_inputs[0], + lb=self.lb, + ub=self.ub, + **self.acqf_kwargs, + ) + + return self.acqf(model=model, lb=self.lb, ub=self.ub, **self.acqf_kwargs) if self.acqf == AnalyticExpectedUtilityOfBestOption: return self.acqf(pref_model=model) if self.acqf in self.baseline_requiring_acqfs: return self.acqf(model, model.train_inputs[0], **self.acqf_kwargs) - else: - return self.acqf(model=model, **self.acqf_kwargs) + + return self.acqf(model=model, **self.acqf_kwargs)
[docs] def gen(self, num_points: int, model: ModelProtocol, **gen_options) -> torch.Tensor: """Query next point(s) to run by optimizing the acquisition function. @@ -142,12 +167,16 @@

Source code for aepsych.generators.optimize_acqf_generator

model.eval() # type: ignore acqf = self._instantiate_acquisition_fn(model) + if hasattr(model, "device"): + self.lb = self.lb.to(model.device) + self.ub = self.ub.to(model.device) + logger.info("Starting gen...") starttime = time.time() new_candidate, _ = optimize_acqf( acq_function=acqf, - bounds=torch.stack([model.lb, model.ub]), + bounds=torch.stack([self.lb, self.ub]), q=num_points, num_restarts=self.restarts, raw_samples=self.samps, @@ -171,6 +200,8 @@

Source code for aepsych.generators.optimize_acqf_generator

restart and sample parameters, maximum generation time, and stimuli per trial. """ classname = cls.__name__ + lb = config.gettensor(classname, "lb") + ub = config.gettensor(classname, "ub") acqf = config.getobj(classname, "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) stimuli_per_trial = config.getint(classname, "stimuli_per_trial") @@ -179,6 +210,8 @@

Source code for aepsych.generators.optimize_acqf_generator

max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None) return cls( + lb=lb, + ub=ub, acqf=acqf, acqf_kwargs=extra_acqf_args, restarts=restarts, diff --git a/api/_modules/aepsych/generators/optimize_acqf_generator/index.html b/api/_modules/aepsych/generators/optimize_acqf_generator/index.html index b4587c340..b1c86c43d 100644 --- a/api/_modules/aepsych/generators/optimize_acqf_generator/index.html +++ b/api/_modules/aepsych/generators/optimize_acqf_generator/index.html @@ -23,10 +23,10 @@

Source code for aepsych.generators.optimize_acqf_generator

# LICENSE file in the root directory of this source tree. from __future__ import annotations +import inspect import time from typing import Any, Dict, Optional -import numpy as np import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator @@ -57,6 +57,8 @@

Source code for aepsych.generators.optimize_acqf_generator

def __init__( self, + lb: torch.Tensor, + ub: torch.Tensor, acqf: AcquisitionFunction, acqf_kwargs: Optional[Dict[str, Any]] = None, restarts: int = 10, @@ -66,6 +68,8 @@

Source code for aepsych.generators.optimize_acqf_generator

) -> None: """Initialize OptimizeAcqfGenerator. Args: + lb (torch.Tensor): Lower bounds for the optimization. + ub (torch.Tensor): Upper bounds for the optimization. acqf (AcquisitionFunction): Acquisition function to use. acqf_kwargs (Dict[str, object], optional): Extra arguments to pass to acquisition function. Defaults to no arguments. @@ -83,6 +87,8 @@

Source code for aepsych.generators.optimize_acqf_generator

self.samps = samps self.max_gen_time = max_gen_time self.stimuli_per_trial = stimuli_per_trial + self.lb = lb + self.ub = ub def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction: """ @@ -94,14 +100,33 @@

Source code for aepsych.generators.optimize_acqf_generator

Returns: AcquisitionFunction: Configured acquisition function. """ + if ( + "lb" in inspect.signature(self.acqf).parameters + and "ub" in inspect.signature(self.acqf).parameters + ): + if self.acqf == AnalyticExpectedUtilityOfBestOption: + return self.acqf(pref_model=model, lb=self.lb, ub=self.ub) + + self.lb = self.lb.to(model.device) + self.ub = self.ub.to(model.device) + if self.acqf in self.baseline_requiring_acqfs: + return self.acqf( + model, + model.train_inputs[0], + lb=self.lb, + ub=self.ub, + **self.acqf_kwargs, + ) + + return self.acqf(model=model, lb=self.lb, ub=self.ub, **self.acqf_kwargs) if self.acqf == AnalyticExpectedUtilityOfBestOption: return self.acqf(pref_model=model) if self.acqf in self.baseline_requiring_acqfs: return self.acqf(model, model.train_inputs[0], **self.acqf_kwargs) - else: - return self.acqf(model=model, **self.acqf_kwargs) + + return self.acqf(model=model, **self.acqf_kwargs)
[docs] def gen(self, num_points: int, model: ModelProtocol, **gen_options) -> torch.Tensor: """Query next point(s) to run by optimizing the acquisition function. @@ -142,12 +167,16 @@

Source code for aepsych.generators.optimize_acqf_generator

model.eval() # type: ignore acqf = self._instantiate_acquisition_fn(model) + if hasattr(model, "device"): + self.lb = self.lb.to(model.device) + self.ub = self.ub.to(model.device) + logger.info("Starting gen...") starttime = time.time() new_candidate, _ = optimize_acqf( acq_function=acqf, - bounds=torch.stack([model.lb, model.ub]), + bounds=torch.stack([self.lb, self.ub]), q=num_points, num_restarts=self.restarts, raw_samples=self.samps, @@ -171,6 +200,8 @@

Source code for aepsych.generators.optimize_acqf_generator

restart and sample parameters, maximum generation time, and stimuli per trial. """ classname = cls.__name__ + lb = config.gettensor(classname, "lb") + ub = config.gettensor(classname, "ub") acqf = config.getobj(classname, "acqf", fallback=None) extra_acqf_args = cls._get_acqf_options(acqf, config) stimuli_per_trial = config.getint(classname, "stimuli_per_trial") @@ -179,6 +210,8 @@

Source code for aepsych.generators.optimize_acqf_generator

max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None) return cls( + lb=lb, + ub=ub, acqf=acqf, acqf_kwargs=extra_acqf_args, restarts=restarts, diff --git a/api/_modules/aepsych/generators/sobol_generator.html b/api/_modules/aepsych/generators/sobol_generator.html index 3178f3708..393975c70 100644 --- a/api/_modules/aepsych/generators/sobol_generator.html +++ b/api/_modules/aepsych/generators/sobol_generator.html @@ -26,7 +26,6 @@

Source code for aepsych.generators.sobol_generator

from typing import Optional -import numpy as np import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator diff --git a/api/_modules/aepsych/generators/sobol_generator/index.html b/api/_modules/aepsych/generators/sobol_generator/index.html index 3178f3708..393975c70 100644 --- a/api/_modules/aepsych/generators/sobol_generator/index.html +++ b/api/_modules/aepsych/generators/sobol_generator/index.html @@ -26,7 +26,6 @@

Source code for aepsych.generators.sobol_generator

from typing import Optional -import numpy as np import torch from aepsych.config import Config from aepsych.generators.base import AEPsychGenerator diff --git a/api/_modules/aepsych/models/base.html b/api/_modules/aepsych/models/base.html index 287d4d88f..ff15caef4 100644 --- a/api/_modules/aepsych/models/base.html +++ b/api/_modules/aepsych/models/base.html @@ -24,25 +24,19 @@

Source code for aepsych.models.base

 # LICENSE file in the root directory of this source tree.
 from __future__ import annotations
 
-import abc
 import time
 from collections.abc import Iterable
 from copy import deepcopy
-from typing import Any, Callable, Dict, List, Mapping, Optional, Protocol, Tuple, Union
+from typing import Any, Callable, Dict, List, Mapping, Optional, Protocol, Tuple
 
 import gpytorch
-import numpy as np
 import torch
-from aepsych.config import Config, ConfigurableMixin
-from aepsych.models.utils import get_extremum, inv_query
-from aepsych.utils import dim_grid, get_jnd_multid, make_scaled_sobol, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.fit import fit_gpytorch_mll, fit_gpytorch_mll_scipy
 from botorch.models.gpytorch import GPyTorchModel
 from botorch.posteriors import GPyTorchPosterior
 from gpytorch.likelihoods import Likelihood
 from gpytorch.mlls import MarginalLogLikelihood
-from scipy.stats import norm
 
 logger = getLogger()
 
@@ -137,218 +131,6 @@ 

Source code for aepsych.models.base

     train_inputs: Optional[Tuple[torch.Tensor]]
     train_targets: Optional[torch.Tensor]
 
-    @property
-    def bounds(self) -> torch.Tensor:
-        return torch.stack((self.lb, self.ub))
-
-
[docs] def get_max( - self: ModelProtocol, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - ) -> Tuple[float, torch.Tensor]: - """Return the maximum of the modeled function, subject to constraints - - Args: - locked_dims (Mapping[int, List[float]], optional): Dimensions to fix, so that the - max is along a slice of the full surface. Defaults to None. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the max and its location (argmax). - """ - locked_dims = locked_dims or {} - _, _arg = get_extremum( - self, "max", self.bounds, locked_dims, n_samples, max_time=max_time - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg) - else: - val, _ = self.predict(arg) - return float(val.item()), arg
- -
[docs] def get_min( - self: ModelProtocol, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - ) -> Tuple[float, torch.Tensor]: - """Return the minimum of the modeled function, subject to constraints - Args: - locked_dims (Mapping[int, List[float]], optional): Dimensions to fix, so that the - min is along a slice of the full surface. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the min and its location (argmin). - """ - locked_dims = locked_dims or {} - _, _arg = get_extremum( - self, "min", self.bounds, locked_dims, n_samples, max_time=max_time - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg) - else: - val, _ = self.predict(arg) - return float(val.item()), arg
- -
[docs] def inv_query( - self, - y: float, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - weights: Optional[torch.Tensor] = None, - ) -> Tuple[float, torch.Tensor]: - """Query the model inverse. - Return nearest x such that f(x) = queried y, and also return the - value of f at that point. - - Args: - y (float): Points at which to find the inverse. - locked_dims (Mapping[int, float], optional): Dimensions to fix, so that the - inverse is along a slice of the full surface. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. Defaults to 1000. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - weights (torch.Tensor, optional): Weights for the optimization. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the value of f - nearest to queried y and the x position of this value. - """ - _, _arg = inv_query( - self, - y=y, - bounds=self.bounds, - locked_dims=locked_dims, - probability_space=probability_space, - n_samples=n_samples, - max_time=max_time, - weights=weights, - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg.reshape(1, self.dim)) - else: - val, _ = self.predict(arg.reshape(1, self.dim)) - return float(val.item()), arg
- -
[docs] def get_jnd( - self: ModelProtocol, - grid: Optional[torch.Tensor] = None, - cred_level: Optional[float] = None, - intensity_dim: int = -1, - confsamps: int = 500, - method: str = "step", - ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: - """Calculate the JND. - - Note that JND can have multiple plausible definitions - outside of the linear case, so we provide options for how to compute it. - For method="step", we report how far one needs to go over in stimulus - space to move 1 unit up in latent space (this is a lot of people's - conventional understanding of the JND). - For method="taylor", we report the local derivative, which also maps to a - 1st-order Taylor expansion of the latent function. This is a formal - generalization of JND as defined in Weber's law. - Both definitions are equivalent for linear psychometric functions. - - Args: - grid (torch.Tensor, optional): Mesh grid over which to find the JND. - Defaults to a square grid of size as determined by aepsych.utils.dim_grid. - cred_level (float, optional): Credible level for computing an interval. - Defaults to None, computing no interval. - intensity_dim (int): Dimension over which to compute the JND. - Defaults to -1. - confsamps (int): Number of posterior samples to use for - computing the credible interval. Defaults to 500. - method (str): "taylor" or "step" method (see docstring). - Defaults to "step". - - Returns: - Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: either the - mean JND, or a median, lower, upper tuple of the JND posterior. - """ - if grid is None: - grid = self.dim_grid() - elif isinstance(grid, np.ndarray): - grid = torch.tensor(grid) - - # this is super awkward, back into intensity dim grid assuming a square grid - gridsize = int(grid.shape[0] ** (1 / grid.shape[1])) - coords = torch.linspace( - self.lb[intensity_dim].item(), self.ub[intensity_dim].item(), gridsize - ) - - if cred_level is None: - fmean, _ = self.predict(grid) - fmean = fmean.reshape(*[gridsize for i in range(self.dim)]) - - if method == "taylor": - return torch.tensor(1 / np.gradient(fmean, coords, axis=intensity_dim)) - elif method == "step": - return torch.clip( - get_jnd_multid( - fmean, - coords, - mono_dim=intensity_dim, - ), - 0, - np.inf, - ) - - alpha = 1 - cred_level # type: ignore - qlower = alpha / 2 - qupper = 1 - alpha / 2 - - fsamps = self.sample(grid, confsamps) - if method == "taylor": - jnds = torch.tensor( - 1 - / np.gradient( - fsamps.reshape(confsamps, *[gridsize for i in range(self.dim)]), - coords, - axis=intensity_dim, - ) - ) - elif method == "step": - samps = [s.reshape((gridsize,) * self.dim) for s in fsamps] - jnds = torch.stack( - [get_jnd_multid(s, coords, mono_dim=intensity_dim) for s in samps] - ) - else: - raise RuntimeError(f"Unknown method {method}!") - upper = torch.clip(torch.quantile(jnds, qupper, axis=0), 0, np.inf) # type: ignore - lower = torch.clip(torch.quantile(jnds, qlower, axis=0), 0, np.inf) # type: ignore - median = torch.clip(torch.quantile(jnds, 0.5, axis=0), 0, np.inf) # type: ignore - return median, lower, upper
- -
[docs] def dim_grid( - self: ModelProtocol, - gridsize: int = 30, - slice_dims: Optional[Mapping[int, float]] = None, - ) -> torch.Tensor: - """Generate a grid based on lower, upper, and dim. - - Args: - gridsize (int): Number of points in each dimension. Defaults to 30. - slice_dims (Mapping[int, float], optional): Dimensions to fix at a certain value. Defaults to None. - """ - return dim_grid(self.lb, self.ub, gridsize, slice_dims)
-
[docs] def set_train_data( self, inputs: Optional[torch.Tensor] = None, diff --git a/api/_modules/aepsych/models/base/index.html b/api/_modules/aepsych/models/base/index.html index 287d4d88f..ff15caef4 100644 --- a/api/_modules/aepsych/models/base/index.html +++ b/api/_modules/aepsych/models/base/index.html @@ -24,25 +24,19 @@

Source code for aepsych.models.base

 # LICENSE file in the root directory of this source tree.
 from __future__ import annotations
 
-import abc
 import time
 from collections.abc import Iterable
 from copy import deepcopy
-from typing import Any, Callable, Dict, List, Mapping, Optional, Protocol, Tuple, Union
+from typing import Any, Callable, Dict, List, Mapping, Optional, Protocol, Tuple
 
 import gpytorch
-import numpy as np
 import torch
-from aepsych.config import Config, ConfigurableMixin
-from aepsych.models.utils import get_extremum, inv_query
-from aepsych.utils import dim_grid, get_jnd_multid, make_scaled_sobol, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.fit import fit_gpytorch_mll, fit_gpytorch_mll_scipy
 from botorch.models.gpytorch import GPyTorchModel
 from botorch.posteriors import GPyTorchPosterior
 from gpytorch.likelihoods import Likelihood
 from gpytorch.mlls import MarginalLogLikelihood
-from scipy.stats import norm
 
 logger = getLogger()
 
@@ -137,218 +131,6 @@ 

Source code for aepsych.models.base

     train_inputs: Optional[Tuple[torch.Tensor]]
     train_targets: Optional[torch.Tensor]
 
-    @property
-    def bounds(self) -> torch.Tensor:
-        return torch.stack((self.lb, self.ub))
-
-
[docs] def get_max( - self: ModelProtocol, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - ) -> Tuple[float, torch.Tensor]: - """Return the maximum of the modeled function, subject to constraints - - Args: - locked_dims (Mapping[int, List[float]], optional): Dimensions to fix, so that the - max is along a slice of the full surface. Defaults to None. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the max and its location (argmax). - """ - locked_dims = locked_dims or {} - _, _arg = get_extremum( - self, "max", self.bounds, locked_dims, n_samples, max_time=max_time - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg) - else: - val, _ = self.predict(arg) - return float(val.item()), arg
- -
[docs] def get_min( - self: ModelProtocol, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - ) -> Tuple[float, torch.Tensor]: - """Return the minimum of the modeled function, subject to constraints - Args: - locked_dims (Mapping[int, List[float]], optional): Dimensions to fix, so that the - min is along a slice of the full surface. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the min and its location (argmin). - """ - locked_dims = locked_dims or {} - _, _arg = get_extremum( - self, "min", self.bounds, locked_dims, n_samples, max_time=max_time - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg) - else: - val, _ = self.predict(arg) - return float(val.item()), arg
- -
[docs] def inv_query( - self, - y: float, - locked_dims: Optional[Mapping[int, float]] = None, - probability_space: bool = False, - n_samples: int = 1000, - max_time: Optional[float] = None, - weights: Optional[torch.Tensor] = None, - ) -> Tuple[float, torch.Tensor]: - """Query the model inverse. - Return nearest x such that f(x) = queried y, and also return the - value of f at that point. - - Args: - y (float): Points at which to find the inverse. - locked_dims (Mapping[int, float], optional): Dimensions to fix, so that the - inverse is along a slice of the full surface. - probability_space (bool): Is y (and therefore the returned nearest_y) in - probability space instead of latent function space? Defaults to False. - n_samples (int): number of coarse grid points to sample for optimization estimate. Defaults to 1000. - max_time (float, optional): Maximum time to spend optimizing. Defaults to None. - weights (torch.Tensor, optional): Weights for the optimization. Defaults to None. - - Returns: - Tuple[float, torch.Tensor]: Tuple containing the value of f - nearest to queried y and the x position of this value. - """ - _, _arg = inv_query( - self, - y=y, - bounds=self.bounds, - locked_dims=locked_dims, - probability_space=probability_space, - n_samples=n_samples, - max_time=max_time, - weights=weights, - ) - arg = torch.tensor(_arg.reshape(1, self.dim)) - if probability_space: - val, _ = self.predict_probability(arg.reshape(1, self.dim)) - else: - val, _ = self.predict(arg.reshape(1, self.dim)) - return float(val.item()), arg
- -
[docs] def get_jnd( - self: ModelProtocol, - grid: Optional[torch.Tensor] = None, - cred_level: Optional[float] = None, - intensity_dim: int = -1, - confsamps: int = 500, - method: str = "step", - ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: - """Calculate the JND. - - Note that JND can have multiple plausible definitions - outside of the linear case, so we provide options for how to compute it. - For method="step", we report how far one needs to go over in stimulus - space to move 1 unit up in latent space (this is a lot of people's - conventional understanding of the JND). - For method="taylor", we report the local derivative, which also maps to a - 1st-order Taylor expansion of the latent function. This is a formal - generalization of JND as defined in Weber's law. - Both definitions are equivalent for linear psychometric functions. - - Args: - grid (torch.Tensor, optional): Mesh grid over which to find the JND. - Defaults to a square grid of size as determined by aepsych.utils.dim_grid. - cred_level (float, optional): Credible level for computing an interval. - Defaults to None, computing no interval. - intensity_dim (int): Dimension over which to compute the JND. - Defaults to -1. - confsamps (int): Number of posterior samples to use for - computing the credible interval. Defaults to 500. - method (str): "taylor" or "step" method (see docstring). - Defaults to "step". - - Returns: - Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: either the - mean JND, or a median, lower, upper tuple of the JND posterior. - """ - if grid is None: - grid = self.dim_grid() - elif isinstance(grid, np.ndarray): - grid = torch.tensor(grid) - - # this is super awkward, back into intensity dim grid assuming a square grid - gridsize = int(grid.shape[0] ** (1 / grid.shape[1])) - coords = torch.linspace( - self.lb[intensity_dim].item(), self.ub[intensity_dim].item(), gridsize - ) - - if cred_level is None: - fmean, _ = self.predict(grid) - fmean = fmean.reshape(*[gridsize for i in range(self.dim)]) - - if method == "taylor": - return torch.tensor(1 / np.gradient(fmean, coords, axis=intensity_dim)) - elif method == "step": - return torch.clip( - get_jnd_multid( - fmean, - coords, - mono_dim=intensity_dim, - ), - 0, - np.inf, - ) - - alpha = 1 - cred_level # type: ignore - qlower = alpha / 2 - qupper = 1 - alpha / 2 - - fsamps = self.sample(grid, confsamps) - if method == "taylor": - jnds = torch.tensor( - 1 - / np.gradient( - fsamps.reshape(confsamps, *[gridsize for i in range(self.dim)]), - coords, - axis=intensity_dim, - ) - ) - elif method == "step": - samps = [s.reshape((gridsize,) * self.dim) for s in fsamps] - jnds = torch.stack( - [get_jnd_multid(s, coords, mono_dim=intensity_dim) for s in samps] - ) - else: - raise RuntimeError(f"Unknown method {method}!") - upper = torch.clip(torch.quantile(jnds, qupper, axis=0), 0, np.inf) # type: ignore - lower = torch.clip(torch.quantile(jnds, qlower, axis=0), 0, np.inf) # type: ignore - median = torch.clip(torch.quantile(jnds, 0.5, axis=0), 0, np.inf) # type: ignore - return median, lower, upper
- -
[docs] def dim_grid( - self: ModelProtocol, - gridsize: int = 30, - slice_dims: Optional[Mapping[int, float]] = None, - ) -> torch.Tensor: - """Generate a grid based on lower, upper, and dim. - - Args: - gridsize (int): Number of points in each dimension. Defaults to 30. - slice_dims (Mapping[int, float], optional): Dimensions to fix at a certain value. Defaults to None. - """ - return dim_grid(self.lb, self.ub, gridsize, slice_dims)
-
[docs] def set_train_data( self, inputs: Optional[torch.Tensor] = None, diff --git a/api/_modules/aepsych/models/gp_classification.html b/api/_modules/aepsych/models/gp_classification.html index adba99465..d3d3e7d94 100644 --- a/api/_modules/aepsych/models/gp_classification.html +++ b/api/_modules/aepsych/models/gp_classification.html @@ -24,7 +24,6 @@

Source code for aepsych.models.gp_classification

# LICENSE file in the root directory of this source tree. from __future__ import annotations -import warnings from copy import deepcopy from typing import Any, Dict, Optional, Tuple @@ -34,16 +33,10 @@

Source code for aepsych.models.gp_classification

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.base import AEPsychModelDeviceMixin -from aepsych.models.inducing_point_allocators import ( - AutoAllocator, - DummyAllocator, - KMeansAllocator, - SobolAllocator, -) -from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.models.inducing_points import GreedyVarianceReduction +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import get_dims, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger -from botorch.models.utils.inducing_point_allocators import InducingPointAllocator from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy @@ -73,40 +66,35 @@

Source code for aepsych.models.gp_classification

def __init__( self, - lb: torch.Tensor, - ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, - dim: Optional[int] = None, + dim: int, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, - inducing_points: Optional[torch.Tensor] = None, ) -> None: """Initialize the GP Classification model Args: - lb (torch.Tensor): Lower bounds of the parameters. - ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method to use for selecting inducing points. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. + dim (int): The number of dimensions in the parameter space. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to Bernouli likelihood. - inducing_size (int, optional): Number of inducing points. Defaults to 99. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): Number of inducing points. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B. """ - lb, ub, self.dim = _process_bounds(lb, ub, dim) + self.dim = dim self.max_fit_time = max_fit_time - self.inducing_size = inducing_size or 99 + self.inducing_size = inducing_size self.optimizer_options = ( {"options": optimizer_options} if optimizer_options else {"options": {}} @@ -129,15 +117,14 @@

Source code for aepsych.models.gp_classification

dim=self.dim, stimuli_per_trial=self.stimuli_per_trial ) - self.inducing_point_method = inducing_point_method - inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + self.inducing_point_method = inducing_point_method or GreedyVarianceReduction( + dim=self.dim + ) + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=covar_module or default_covar, ) - self.last_inducing_points_method = self.inducing_point_method.allocator_used - self.inducing_points = inducing_points variational_distribution = CholeskyVariationalDistribution( inducing_points.size(0), batch_shape=torch.Size([self._batch_size]) ).to(inducing_points) @@ -150,9 +137,6 @@

Source code for aepsych.models.gp_classification

) super().__init__(variational_strategy) - # Tensors need to be directly registered, Modules themselves can be assigned as attr - self.register_buffer("lb", lb) - self.register_buffer("ub", ub) self.likelihood = likelihood self.mean_module = mean_module or default_mean self.covar_module = covar_module or default_covar @@ -175,11 +159,11 @@

Source code for aepsych.models.gp_classification

""" classname = cls.__name__ - inducing_size = config.getint(classname, "inducing_size", fallback=None) + inducing_size = config.getint(classname, "inducing_size", fallback=100) - lb = config.gettensor(classname, "lb") - ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) + if dim is None: + dim = get_dims(config) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=default_mean_covar_factory @@ -189,7 +173,7 @@

Source code for aepsych.models.gp_classification

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method_class = config.getobj( - classname, "inducing_point_method", fallback=AutoAllocator + classname, "inducing_point_method", fallback=GreedyVarianceReduction ) # Check if allocator class has a `from_config` method if hasattr(inducing_point_method_class, "from_config"): @@ -210,8 +194,6 @@

Source code for aepsych.models.gp_classification

optimizer_options = get_optimizer_options(config, classname) return cls( - lb=lb, - ub=ub, dim=dim, inducing_size=inducing_size, mean_module=mean, @@ -238,14 +220,11 @@

Source code for aepsych.models.gp_classification

if self.train_inputs is not None: # remember original device device = self.device - inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=self.covar_module, - X=self.train_inputs[0], - bounds=self.bounds, + inputs=self.train_inputs[0], ).to(device) - self.last_inducing_points_method = self.inducing_point_method.allocator_used variational_distribution = CholeskyVariationalDistribution( inducing_points.size(0), batch_shape=torch.Size([self._batch_size]) ).to(device) @@ -282,8 +261,7 @@

Source code for aepsych.models.gp_classification

self._reset_hyperparameters() if not warmstart_induc or ( - self.last_inducing_points_method == "DummyAllocator" - and self.inducing_point_method.__class__.__name__ != "DummyAllocator" + self.inducing_point_method.last_allocator_used is None ): self._reset_variational_strategy() @@ -381,40 +359,34 @@

Source code for aepsych.models.gp_classification

def __init__( self, - lb: torch.Tensor, - ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, - dim: Optional[int] = None, + dim: int, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the GP Beta Regression model Args: - lb (torch.Tensor): Lower bounds of the parameters. - ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method to use to select the inducing points. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. Defaults to None. + dim (int): The number of dimensions in the parameter space. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. Defaults to None. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to Beta likelihood. - inducing_size (int, optional): Number of inducing points. Defaults to 100. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): Number of inducing points. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None. """ if likelihood is None: likelihood = BetaLikelihood() - self.inducing_point_method = inducing_point_method + super().__init__( - lb=lb, - ub=ub, dim=dim, mean_module=mean_module, covar_module=covar_module, diff --git a/api/_modules/aepsych/models/gp_classification/index.html b/api/_modules/aepsych/models/gp_classification/index.html index adba99465..d3d3e7d94 100644 --- a/api/_modules/aepsych/models/gp_classification/index.html +++ b/api/_modules/aepsych/models/gp_classification/index.html @@ -24,7 +24,6 @@

Source code for aepsych.models.gp_classification

# LICENSE file in the root directory of this source tree. from __future__ import annotations -import warnings from copy import deepcopy from typing import Any, Dict, Optional, Tuple @@ -34,16 +33,10 @@

Source code for aepsych.models.gp_classification

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.base import AEPsychModelDeviceMixin -from aepsych.models.inducing_point_allocators import ( - AutoAllocator, - DummyAllocator, - KMeansAllocator, - SobolAllocator, -) -from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.models.inducing_points import GreedyVarianceReduction +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import get_dims, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger -from botorch.models.utils.inducing_point_allocators import InducingPointAllocator from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy @@ -73,40 +66,35 @@

Source code for aepsych.models.gp_classification

def __init__( self, - lb: torch.Tensor, - ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, - dim: Optional[int] = None, + dim: int, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, - inducing_points: Optional[torch.Tensor] = None, ) -> None: """Initialize the GP Classification model Args: - lb (torch.Tensor): Lower bounds of the parameters. - ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method to use for selecting inducing points. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. + dim (int): The number of dimensions in the parameter space. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to Bernouli likelihood. - inducing_size (int, optional): Number of inducing points. Defaults to 99. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): Number of inducing points. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B. """ - lb, ub, self.dim = _process_bounds(lb, ub, dim) + self.dim = dim self.max_fit_time = max_fit_time - self.inducing_size = inducing_size or 99 + self.inducing_size = inducing_size self.optimizer_options = ( {"options": optimizer_options} if optimizer_options else {"options": {}} @@ -129,15 +117,14 @@

Source code for aepsych.models.gp_classification

dim=self.dim, stimuli_per_trial=self.stimuli_per_trial ) - self.inducing_point_method = inducing_point_method - inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + self.inducing_point_method = inducing_point_method or GreedyVarianceReduction( + dim=self.dim + ) + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=covar_module or default_covar, ) - self.last_inducing_points_method = self.inducing_point_method.allocator_used - self.inducing_points = inducing_points variational_distribution = CholeskyVariationalDistribution( inducing_points.size(0), batch_shape=torch.Size([self._batch_size]) ).to(inducing_points) @@ -150,9 +137,6 @@

Source code for aepsych.models.gp_classification

) super().__init__(variational_strategy) - # Tensors need to be directly registered, Modules themselves can be assigned as attr - self.register_buffer("lb", lb) - self.register_buffer("ub", ub) self.likelihood = likelihood self.mean_module = mean_module or default_mean self.covar_module = covar_module or default_covar @@ -175,11 +159,11 @@

Source code for aepsych.models.gp_classification

""" classname = cls.__name__ - inducing_size = config.getint(classname, "inducing_size", fallback=None) + inducing_size = config.getint(classname, "inducing_size", fallback=100) - lb = config.gettensor(classname, "lb") - ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) + if dim is None: + dim = get_dims(config) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=default_mean_covar_factory @@ -189,7 +173,7 @@

Source code for aepsych.models.gp_classification

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method_class = config.getobj( - classname, "inducing_point_method", fallback=AutoAllocator + classname, "inducing_point_method", fallback=GreedyVarianceReduction ) # Check if allocator class has a `from_config` method if hasattr(inducing_point_method_class, "from_config"): @@ -210,8 +194,6 @@

Source code for aepsych.models.gp_classification

optimizer_options = get_optimizer_options(config, classname) return cls( - lb=lb, - ub=ub, dim=dim, inducing_size=inducing_size, mean_module=mean, @@ -238,14 +220,11 @@

Source code for aepsych.models.gp_classification

if self.train_inputs is not None: # remember original device device = self.device - inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=self.covar_module, - X=self.train_inputs[0], - bounds=self.bounds, + inputs=self.train_inputs[0], ).to(device) - self.last_inducing_points_method = self.inducing_point_method.allocator_used variational_distribution = CholeskyVariationalDistribution( inducing_points.size(0), batch_shape=torch.Size([self._batch_size]) ).to(device) @@ -282,8 +261,7 @@

Source code for aepsych.models.gp_classification

self._reset_hyperparameters() if not warmstart_induc or ( - self.last_inducing_points_method == "DummyAllocator" - and self.inducing_point_method.__class__.__name__ != "DummyAllocator" + self.inducing_point_method.last_allocator_used is None ): self._reset_variational_strategy() @@ -381,40 +359,34 @@

Source code for aepsych.models.gp_classification

def __init__( self, - lb: torch.Tensor, - ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, - dim: Optional[int] = None, + dim: int, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize the GP Beta Regression model Args: - lb (torch.Tensor): Lower bounds of the parameters. - ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method to use to select the inducing points. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. Defaults to None. + dim (int): The number of dimensions in the parameter space. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. Defaults to None. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to Beta likelihood. - inducing_size (int, optional): Number of inducing points. Defaults to 100. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): Number of inducing points. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None. """ if likelihood is None: likelihood = BetaLikelihood() - self.inducing_point_method = inducing_point_method + super().__init__( - lb=lb, - ub=ub, dim=dim, mean_module=mean_module, covar_module=covar_module, diff --git a/api/_modules/aepsych/models/gp_regression.html b/api/_modules/aepsych/models/gp_regression.html index abcc1f382..e227f4853 100644 --- a/api/_modules/aepsych/models/gp_regression.html +++ b/api/_modules/aepsych/models/gp_regression.html @@ -28,12 +28,11 @@

Source code for aepsych.models.gp_regression

from typing import Any, Dict, Optional, Tuple
 
 import gpytorch
-import numpy as np
 import torch
 from aepsych.config import Config
 from aepsych.factory.default import default_mean_covar_factory
 from aepsych.models.base import AEPsychModelDeviceMixin
-from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
+from aepsych.utils import get_dims, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from gpytorch.likelihoods import GaussianLikelihood, Likelihood
 from gpytorch.models import ExactGP
@@ -51,9 +50,7 @@ 

Source code for aepsych.models.gp_regression

def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        dim: Optional[int] = None,
+        dim: int,
         mean_module: Optional[gpytorch.means.Mean] = None,
         covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
@@ -63,10 +60,7 @@ 

Source code for aepsych.models.gp_regression

        """Initialize the GP regression model
 
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub.
+            dim (int): The number of dimensions in the parameter space.
             mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
             covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
                 gamma prior.
@@ -77,12 +71,13 @@ 

Source code for aepsych.models.gp_regression

            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
+        self.dim = dim
+
         if likelihood is None:
             likelihood = GaussianLikelihood()
 
         super().__init__(None, None, likelihood)
 
-        lb, ub, self.dim = _process_bounds(lb, ub, dim)
         self.max_fit_time = max_fit_time
 
         self.optimizer_options = (
@@ -91,12 +86,10 @@ 

Source code for aepsych.models.gp_regression

if mean_module is None or covar_module is None:
             default_mean, default_covar = default_mean_covar_factory(
-                dim=self.dim, stimuli_per_trial=self.stimuli_per_trial
+                dim=self.dim,
+                stimuli_per_trial=self.stimuli_per_trial,
             )
 
-        # Tensors need to be directly registered, Modules themselves can be assigned as attr
-        self.register_buffer("lb", lb)
-        self.register_buffer("ub", ub)
         self.likelihood = likelihood
         self.mean_module = mean_module or default_mean
         self.covar_module = covar_module or default_covar
@@ -116,9 +109,9 @@ 

Source code for aepsych.models.gp_regression

        """
         classname = cls.__name__
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
+        if dim is None:
+            dim = get_dims(config)
 
         mean_covar_factory = config.getobj(
             classname, "mean_covar_factory", fallback=default_mean_covar_factory
@@ -141,8 +134,6 @@ 

Source code for aepsych.models.gp_regression

optimizer_options = get_optimizer_options(config, classname)
 
         return {
-            "lb": lb,
-            "ub": ub,
             "dim": dim,
             "mean_module": mean,
             "covar_module": covar,
diff --git a/api/_modules/aepsych/models/gp_regression/index.html b/api/_modules/aepsych/models/gp_regression/index.html
index abcc1f382..e227f4853 100644
--- a/api/_modules/aepsych/models/gp_regression/index.html
+++ b/api/_modules/aepsych/models/gp_regression/index.html
@@ -28,12 +28,11 @@ 

Source code for aepsych.models.gp_regression

from typing import Any, Dict, Optional, Tuple
 
 import gpytorch
-import numpy as np
 import torch
 from aepsych.config import Config
 from aepsych.factory.default import default_mean_covar_factory
 from aepsych.models.base import AEPsychModelDeviceMixin
-from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
+from aepsych.utils import get_dims, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from gpytorch.likelihoods import GaussianLikelihood, Likelihood
 from gpytorch.models import ExactGP
@@ -51,9 +50,7 @@ 

Source code for aepsych.models.gp_regression

def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        dim: Optional[int] = None,
+        dim: int,
         mean_module: Optional[gpytorch.means.Mean] = None,
         covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
@@ -63,10 +60,7 @@ 

Source code for aepsych.models.gp_regression

        """Initialize the GP regression model
 
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub.
+            dim (int): The number of dimensions in the parameter space.
             mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
             covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
                 gamma prior.
@@ -77,12 +71,13 @@ 

Source code for aepsych.models.gp_regression

            optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
+        self.dim = dim
+
         if likelihood is None:
             likelihood = GaussianLikelihood()
 
         super().__init__(None, None, likelihood)
 
-        lb, ub, self.dim = _process_bounds(lb, ub, dim)
         self.max_fit_time = max_fit_time
 
         self.optimizer_options = (
@@ -91,12 +86,10 @@ 

Source code for aepsych.models.gp_regression

if mean_module is None or covar_module is None:
             default_mean, default_covar = default_mean_covar_factory(
-                dim=self.dim, stimuli_per_trial=self.stimuli_per_trial
+                dim=self.dim,
+                stimuli_per_trial=self.stimuli_per_trial,
             )
 
-        # Tensors need to be directly registered, Modules themselves can be assigned as attr
-        self.register_buffer("lb", lb)
-        self.register_buffer("ub", ub)
         self.likelihood = likelihood
         self.mean_module = mean_module or default_mean
         self.covar_module = covar_module or default_covar
@@ -116,9 +109,9 @@ 

Source code for aepsych.models.gp_regression

        """
         classname = cls.__name__
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
+        if dim is None:
+            dim = get_dims(config)
 
         mean_covar_factory = config.getobj(
             classname, "mean_covar_factory", fallback=default_mean_covar_factory
@@ -141,8 +134,6 @@ 

Source code for aepsych.models.gp_regression

optimizer_options = get_optimizer_options(config, classname)
 
         return {
-            "lb": lb,
-            "ub": ub,
             "dim": dim,
             "mean_module": mean,
             "covar_module": covar,
diff --git a/api/_modules/aepsych/models/inducing_point_allocators.html b/api/_modules/aepsych/models/inducing_point_allocators.html
deleted file mode 100644
index c56bf9b1c..000000000
--- a/api/_modules/aepsych/models/inducing_point_allocators.html
+++ /dev/null
@@ -1,671 +0,0 @@
-AEPsych · Adaptive experimentation for human perception and perceptually-informed outcomes
\ No newline at end of file
diff --git a/api/_modules/aepsych/models/inducing_point_allocators/index.html b/api/_modules/aepsych/models/inducing_point_allocators/index.html
deleted file mode 100644
index c56bf9b1c..000000000
--- a/api/_modules/aepsych/models/inducing_point_allocators/index.html
+++ /dev/null
@@ -1,671 +0,0 @@
-AEPsych · Adaptive experimentation for human perception and perceptually-informed outcomes
\ No newline at end of file
diff --git a/api/_modules/aepsych/models/monotonic_projection_gp.html b/api/_modules/aepsych/models/monotonic_projection_gp.html
index 739195649..f8ee6dda0 100644
--- a/api/_modules/aepsych/models/monotonic_projection_gp.html
+++ b/api/_modules/aepsych/models/monotonic_projection_gp.html
@@ -33,9 +33,9 @@ 

Source code for aepsych.models.monotonic_projection_gp

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.gp_classification import GPClassificationModel -from aepsych.models.inducing_point_allocators import AutoAllocator -from aepsych.utils import get_optimizer_options -from botorch.models.utils.inducing_point_allocators import InducingPointAllocator +from aepsych.models.inducing_points import GreedyVarianceReduction +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import get_dims, get_optimizer_options from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.likelihoods import Likelihood from statsmodels.stats.moment_helpers import corr2cov, cov2corr @@ -115,36 +115,36 @@

Source code for aepsych.models.monotonic_projection_gp

self, lb: torch.Tensor, ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, + dim: int, monotonic_dims: List[int], monotonic_grid_size: int = 20, min_f_val: Optional[float] = None, - dim: Optional[int] = None, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: - """Initialize the MonotonicProjectionGP model. + """Initialize the MonotonicProjectionGP model. Unlike other models, this model needs bounds. Args: lb (torch.Tensor): Lower bounds of the parameters. ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method for allocating inducing points. + dim (int, optional): The number of dimensions in the parameter space. monotonic_dims (List[int]): A list of the dimensions on which monotonicity should be enforced. monotonic_grid_size (int): The size of the grid, s, in 1. above. Defaults to 20. min_f_val (float, optional): If provided, maintains this minimum in the projection in 5. Defaults to None. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. Defaults to None. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. Defaults to None. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. Defaults to None. likelihood (Likelihood, optional): The likelihood function to use. If None defaults to Gaussian likelihood. Defaults to None. - inducing_size (int, optional): The number of inducing points to use. Defaults to None. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): The number of inducing points to use. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None. """ @@ -152,10 +152,10 @@

Source code for aepsych.models.monotonic_projection_gp

self.monotonic_dims = [int(d) for d in monotonic_dims] self.mon_grid_size = monotonic_grid_size self.min_f_val = min_f_val - self.inducing_point_method = inducing_point_method + self.lb = lb + self.ub = ub + super().__init__( - lb=lb, - ub=ub, dim=dim, mean_module=mean_module, covar_module=covar_module, @@ -251,12 +251,15 @@

Source code for aepsych.models.monotonic_projection_gp

""" classname = cls.__name__ - inducing_size = config.getint(classname, "inducing_size", fallback=None) + inducing_size = config.getint(classname, "inducing_size", fallback=100) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) + if dim is None: + dim = get_dims(config) + mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=default_mean_covar_factory ) @@ -265,7 +268,7 @@

Source code for aepsych.models.monotonic_projection_gp

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method_class = config.getobj( - classname, "inducing_point_method", fallback=AutoAllocator + classname, "inducing_point_method", fallback=GreedyVarianceReduction ) # Check if allocator class has a `from_config` method if hasattr(inducing_point_method_class, "from_config"): diff --git a/api/_modules/aepsych/models/monotonic_projection_gp/index.html b/api/_modules/aepsych/models/monotonic_projection_gp/index.html index 739195649..f8ee6dda0 100644 --- a/api/_modules/aepsych/models/monotonic_projection_gp/index.html +++ b/api/_modules/aepsych/models/monotonic_projection_gp/index.html @@ -33,9 +33,9 @@

Source code for aepsych.models.monotonic_projection_gp

from aepsych.config import Config from aepsych.factory.default import default_mean_covar_factory from aepsych.models.gp_classification import GPClassificationModel -from aepsych.models.inducing_point_allocators import AutoAllocator -from aepsych.utils import get_optimizer_options -from botorch.models.utils.inducing_point_allocators import InducingPointAllocator +from aepsych.models.inducing_points import GreedyVarianceReduction +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import get_dims, get_optimizer_options from botorch.posteriors.gpytorch import GPyTorchPosterior from gpytorch.likelihoods import Likelihood from statsmodels.stats.moment_helpers import corr2cov, cov2corr @@ -115,36 +115,36 @@

Source code for aepsych.models.monotonic_projection_gp

self, lb: torch.Tensor, ub: torch.Tensor, - inducing_point_method: InducingPointAllocator, + dim: int, monotonic_dims: List[int], monotonic_grid_size: int = 20, min_f_val: Optional[float] = None, - dim: Optional[int] = None, mean_module: Optional[gpytorch.means.Mean] = None, covar_module: Optional[gpytorch.kernels.Kernel] = None, likelihood: Optional[Likelihood] = None, - inducing_size: Optional[int] = None, + inducing_point_method: Optional[InducingPointAllocator] = None, + inducing_size: int = 100, max_fit_time: Optional[float] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: - """Initialize the MonotonicProjectionGP model. + """Initialize the MonotonicProjectionGP model. Unlike other models, this model needs bounds. Args: lb (torch.Tensor): Lower bounds of the parameters. ub (torch.Tensor): Upper bounds of the parameters. - inducing_point_method (InducingPointAllocator): The method for allocating inducing points. + dim (int, optional): The number of dimensions in the parameter space. monotonic_dims (List[int]): A list of the dimensions on which monotonicity should be enforced. monotonic_grid_size (int): The size of the grid, s, in 1. above. Defaults to 20. min_f_val (float, optional): If provided, maintains this minimum in the projection in 5. Defaults to None. - dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size - of lb and ub. Defaults to None. mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior. Defaults to None. covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a gamma prior. Defaults to None. likelihood (Likelihood, optional): The likelihood function to use. If None defaults to Gaussian likelihood. Defaults to None. - inducing_size (int, optional): The number of inducing points to use. Defaults to None. + inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points. + If not set, a GreedyVarianceReduction is made. + inducing_size (int): The number of inducing points to use. Defaults to 100. max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None. """ @@ -152,10 +152,10 @@

Source code for aepsych.models.monotonic_projection_gp

self.monotonic_dims = [int(d) for d in monotonic_dims] self.mon_grid_size = monotonic_grid_size self.min_f_val = min_f_val - self.inducing_point_method = inducing_point_method + self.lb = lb + self.ub = ub + super().__init__( - lb=lb, - ub=ub, dim=dim, mean_module=mean_module, covar_module=covar_module, @@ -251,12 +251,15 @@

Source code for aepsych.models.monotonic_projection_gp

""" classname = cls.__name__ - inducing_size = config.getint(classname, "inducing_size", fallback=None) + inducing_size = config.getint(classname, "inducing_size", fallback=100) lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") dim = config.getint(classname, "dim", fallback=None) + if dim is None: + dim = get_dims(config) + mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=default_mean_covar_factory ) @@ -265,7 +268,7 @@

Source code for aepsych.models.monotonic_projection_gp

max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) inducing_point_method_class = config.getobj( - classname, "inducing_point_method", fallback=AutoAllocator + classname, "inducing_point_method", fallback=GreedyVarianceReduction ) # Check if allocator class has a `from_config` method if hasattr(inducing_point_method_class, "from_config"): diff --git a/api/_modules/aepsych/models/monotonic_rejection_gp.html b/api/_modules/aepsych/models/monotonic_rejection_gp.html index 19be91b17..1e12bcfd3 100644 --- a/api/_modules/aepsych/models/monotonic_rejection_gp.html +++ b/api/_modules/aepsych/models/monotonic_rejection_gp.html @@ -29,7 +29,6 @@

Source code for aepsych.models.monotonic_rejection_gp

from typing import Any, Dict, List, Optional, Sequence, Tuple import gpytorch -import numpy as np import torch from aepsych.acquisition.rejection_sampler import RejectionSampler from aepsych.config import Config @@ -37,14 +36,10 @@

Source code for aepsych.models.monotonic_rejection_gp

from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad from aepsych.models.base import AEPsychMixin -from aepsych.models.inducing_point_allocators import AutoAllocator, SobolAllocator -from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.models.inducing_points import GreedyVarianceReduction, SobolAllocator +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import _process_bounds, get_dims, get_optimizer_options, promote_0d from botorch.fit import fit_gpytorch_mll -from botorch.models.utils.inducing_point_allocators import ( - GreedyVarianceReduction, - InducingPointAllocator, -) from gpytorch.kernels import Kernel from gpytorch.likelihoods import BernoulliLikelihood, Likelihood from gpytorch.means import Mean @@ -52,7 +47,6 @@

Source code for aepsych.models.monotonic_rejection_gp

from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy from scipy.stats import norm -from torch import Tensor
[docs]class MonotonicRejectionGP(AEPsychMixin, ApproximateGP): @@ -85,7 +79,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_induc: int = 25, num_samples: int = 250, num_rejection_samples: int = 5000, - inducing_point_method: InducingPointAllocator = AutoAllocator(), + inducing_point_method: Optional[InducingPointAllocator] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize MonotonicRejectionGP. @@ -106,7 +100,8 @@

Source code for aepsych.models.monotonic_rejection_gp

num_samples (int): Number of samples for estimating posterior on preDict or acquisition function evaluation. Defaults to 250. num_rejection_samples (int): Number of samples used for rejection sampling. Defaults to 4096. - inducing_point_method (InducingPointAllocator): Method for selecting inducing points. Defaults to AutoAllocator(). + inducing_point_method (InducingPointAllocator, optional): Method for selecting inducing points. If not set, + a GreedyVarianceReduction is created. optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B. """ @@ -115,11 +110,12 @@

Source code for aepsych.models.monotonic_rejection_gp

likelihood = BernoulliLikelihood() self.inducing_size = num_induc - self.inducing_point_method = inducing_point_method + self.inducing_point_method = inducing_point_method or GreedyVarianceReduction( + dim=self.dim + ) - inducing_points = select_inducing_points( - allocator=SobolAllocator(bounds=torch.stack((self.lb, self.ub))), - inducing_size=self.inducing_size, + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size ) inducing_points_aug = self._augment_with_deriv_index(inducing_points, 0) @@ -185,12 +181,10 @@

Source code for aepsych.models.monotonic_rejection_gp

""" self.set_train_data(train_x, train_y) - self.inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + self.inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=self.covar_module, - X=self.train_inputs[0], - bounds=self.bounds, + inputs=self._augment_with_deriv_index(self.train_inputs[0], 0), ) self._set_model(train_x, train_y)
@@ -374,7 +368,7 @@

Source code for aepsych.models.monotonic_rejection_gp

lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") - dim = config.getint(classname, "dim", fallback=None) + dim = get_dims(config) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory diff --git a/api/_modules/aepsych/models/monotonic_rejection_gp/index.html b/api/_modules/aepsych/models/monotonic_rejection_gp/index.html index 19be91b17..1e12bcfd3 100644 --- a/api/_modules/aepsych/models/monotonic_rejection_gp/index.html +++ b/api/_modules/aepsych/models/monotonic_rejection_gp/index.html @@ -29,7 +29,6 @@

Source code for aepsych.models.monotonic_rejection_gp

from typing import Any, Dict, List, Optional, Sequence, Tuple import gpytorch -import numpy as np import torch from aepsych.acquisition.rejection_sampler import RejectionSampler from aepsych.config import Config @@ -37,14 +36,10 @@

Source code for aepsych.models.monotonic_rejection_gp

from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad from aepsych.models.base import AEPsychMixin -from aepsych.models.inducing_point_allocators import AutoAllocator, SobolAllocator -from aepsych.models.utils import select_inducing_points -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.models.inducing_points import GreedyVarianceReduction, SobolAllocator +from aepsych.models.inducing_points.base import InducingPointAllocator +from aepsych.utils import _process_bounds, get_dims, get_optimizer_options, promote_0d from botorch.fit import fit_gpytorch_mll -from botorch.models.utils.inducing_point_allocators import ( - GreedyVarianceReduction, - InducingPointAllocator, -) from gpytorch.kernels import Kernel from gpytorch.likelihoods import BernoulliLikelihood, Likelihood from gpytorch.means import Mean @@ -52,7 +47,6 @@

Source code for aepsych.models.monotonic_rejection_gp

from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy from scipy.stats import norm -from torch import Tensor
[docs]class MonotonicRejectionGP(AEPsychMixin, ApproximateGP): @@ -85,7 +79,7 @@

Source code for aepsych.models.monotonic_rejection_gp

num_induc: int = 25, num_samples: int = 250, num_rejection_samples: int = 5000, - inducing_point_method: InducingPointAllocator = AutoAllocator(), + inducing_point_method: Optional[InducingPointAllocator] = None, optimizer_options: Optional[Dict[str, Any]] = None, ) -> None: """Initialize MonotonicRejectionGP. @@ -106,7 +100,8 @@

Source code for aepsych.models.monotonic_rejection_gp

num_samples (int): Number of samples for estimating posterior on preDict or acquisition function evaluation. Defaults to 250. num_rejection_samples (int): Number of samples used for rejection sampling. Defaults to 4096. - inducing_point_method (InducingPointAllocator): Method for selecting inducing points. Defaults to AutoAllocator(). + inducing_point_method (InducingPointAllocator, optional): Method for selecting inducing points. If not set, + a GreedyVarianceReduction is created. optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B. """ @@ -115,11 +110,12 @@

Source code for aepsych.models.monotonic_rejection_gp

likelihood = BernoulliLikelihood() self.inducing_size = num_induc - self.inducing_point_method = inducing_point_method + self.inducing_point_method = inducing_point_method or GreedyVarianceReduction( + dim=self.dim + ) - inducing_points = select_inducing_points( - allocator=SobolAllocator(bounds=torch.stack((self.lb, self.ub))), - inducing_size=self.inducing_size, + inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size ) inducing_points_aug = self._augment_with_deriv_index(inducing_points, 0) @@ -185,12 +181,10 @@

Source code for aepsych.models.monotonic_rejection_gp

""" self.set_train_data(train_x, train_y) - self.inducing_points = select_inducing_points( - allocator=self.inducing_point_method, - inducing_size=self.inducing_size, + self.inducing_points = self.inducing_point_method.allocate_inducing_points( + num_inducing=self.inducing_size, covar_module=self.covar_module, - X=self.train_inputs[0], - bounds=self.bounds, + inputs=self._augment_with_deriv_index(self.train_inputs[0], 0), ) self._set_model(train_x, train_y)
@@ -374,7 +368,7 @@

Source code for aepsych.models.monotonic_rejection_gp

lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") - dim = config.getint(classname, "dim", fallback=None) + dim = get_dims(config) mean_covar_factory = config.getobj( classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory diff --git a/api/_modules/aepsych/models/pairwise_probit.html b/api/_modules/aepsych/models/pairwise_probit.html index 5a6806e39..9a3936b5c 100644 --- a/api/_modules/aepsych/models/pairwise_probit.html +++ b/api/_modules/aepsych/models/pairwise_probit.html @@ -30,7 +30,7 @@

Source code for aepsych.models.pairwise_probit

from aepsych.config import Config from aepsych.factory import default_mean_covar_factory from aepsych.models.base import AEPsychMixin -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.utils import _process_bounds, get_dims, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from botorch.fit import fit_gpytorch_mll from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood @@ -310,7 +310,7 @@

Source code for aepsych.models.pairwise_probit

lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") - dim = lb.shape[0] + dim = get_dims(config) max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) diff --git a/api/_modules/aepsych/models/pairwise_probit/index.html b/api/_modules/aepsych/models/pairwise_probit/index.html index 5a6806e39..9a3936b5c 100644 --- a/api/_modules/aepsych/models/pairwise_probit/index.html +++ b/api/_modules/aepsych/models/pairwise_probit/index.html @@ -30,7 +30,7 @@

Source code for aepsych.models.pairwise_probit

from aepsych.config import Config from aepsych.factory import default_mean_covar_factory from aepsych.models.base import AEPsychMixin -from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d +from aepsych.utils import _process_bounds, get_dims, get_optimizer_options, promote_0d from aepsych.utils_logging import getLogger from botorch.fit import fit_gpytorch_mll from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood @@ -310,7 +310,7 @@

Source code for aepsych.models.pairwise_probit

lb = config.gettensor(classname, "lb") ub = config.gettensor(classname, "ub") - dim = lb.shape[0] + dim = get_dims(config) max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None) diff --git a/api/_modules/aepsych/models/semi_p.html b/api/_modules/aepsych/models/semi_p.html index 4f80b16e1..2f7d5bd39 100644 --- a/api/_modules/aepsych/models/semi_p.html +++ b/api/_modules/aepsych/models/semi_p.html @@ -25,6 +25,7 @@

Source code for aepsych.models.semi_p

 
 from __future__ import annotations
 
+import warnings
 from copy import deepcopy
 from typing import Any, Dict, Optional, Tuple
 
@@ -36,11 +37,11 @@ 

Source code for aepsych.models.semi_p

 from aepsych.config import Config
 from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
 from aepsych.models import GPClassificationModel
-from aepsych.models.inducing_point_allocators import AutoAllocator
-from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
+from aepsych.models.inducing_points import GreedyVarianceReduction
+from aepsych.models.inducing_points.base import InducingPointAllocator
+from aepsych.utils import get_dims, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.acquisition.objective import PosteriorTransform
-from botorch.models.utils.inducing_point_allocators import InducingPointAllocator
 from botorch.optim.fit import fit_gpytorch_mll_scipy
 from botorch.posteriors import GPyTorchPosterior
 from gpytorch.distributions import MultivariateNormal
@@ -269,27 +270,21 @@ 

Source code for aepsych.models.semi_p

 
     def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        inducing_point_method: InducingPointAllocator,
-        dim: Optional[int] = None,
+        dim: int,
         stim_dim: int = 0,
         mean_module: Optional[gpytorch.means.Mean] = None,
         covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Any] = None,
         slope_mean: float = 2,
-        inducing_size: Optional[int] = None,
+        inducing_point_method: Optional[InducingPointAllocator] = None,
+        inducing_size: int = 100,
         max_fit_time: Optional[float] = None,
         optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize SemiParametricGP.
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            inducing_point_method (InducingPointAllocator): The method to use to select the inducing points.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub. Defaults to None.
+            dim (int, optional): The number of dimensions in the parameter space.
             stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
             mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
             covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
@@ -297,16 +292,18 @@ 

Source code for aepsych.models.semi_p

             likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
                 linear-Bernouli likelihood with probit link.
             slope_mean (float): The mean of the slope. Defaults to 2.
-            inducing_size (int, optional): Number of inducing points. Defaults to 99.
+            inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points.
+                If not set, a GreedyVarianceReduction is made.
+            inducing_size (int): Number of inducing points. Defaults to 100.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
             optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
 
-        lb, ub, dim = _process_bounds(lb, ub, dim)
+        self.dim = dim
         self.stim_dim = stim_dim
-        self.context_dims = list(range(dim))
+        self.context_dims = list(range(self.dim))
         self.context_dims.pop(stim_dim)
 
         if mean_module is None:
@@ -319,7 +316,7 @@ 

Source code for aepsych.models.semi_p

         if covar_module is None:
             covar_module = ScaleKernel(
                 RBFKernel(
-                    ard_num_dims=dim - 1,
+                    ard_num_dims=self.dim - 1,
                     lengthscale_prior=GammaPrior(3, 6),
                     active_dims=self.context_dims,  # Operate only on x_s
                     batch_shape=torch.Size([2]),
@@ -331,11 +328,8 @@ 

Source code for aepsych.models.semi_p

         assert isinstance(
             likelihood, LinearBernoulliLikelihood
         ), "SemiP model only supports linear Bernoulli likelihoods!"
-        self.inducing_point_method = inducing_point_method
 
         super().__init__(
-            lb=lb,
-            ub=ub,
             dim=dim,
             mean_module=mean_module,
             covar_module=covar_module,
@@ -361,16 +355,17 @@ 

Source code for aepsych.models.semi_p

         """
 
         classname = cls.__name__
-        inducing_size = config.getint(classname, "inducing_size", fallback=None)
+        inducing_size = config.getint(classname, "inducing_size", fallback=100)
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
 
+        if dim is None:
+            dim = get_dims(config)
+
         max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
         inducing_point_method_class = config.getobj(
-            classname, "inducing_point_method", fallback=AutoAllocator
+            classname, "inducing_point_method", fallback=GreedyVarianceReduction
         )
         # Check if allocator class has a `from_config` method
         if hasattr(inducing_point_method_class, "from_config"):
@@ -393,8 +388,6 @@ 

Source code for aepsych.models.semi_p

         optimizer_options = get_optimizer_options(config, classname)
 
         return cls(
-            lb=lb,
-            ub=ub,
             stim_dim=stim_dim,
             dim=dim,
             likelihood=likelihood,
@@ -537,10 +530,7 @@ 

Source code for aepsych.models.semi_p

 
     def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        inducing_point_method: InducingPointAllocator,
-        dim: Optional[int] = None,
+        dim: int,
         stim_dim: int = 0,
         slope_mean_module: Optional[gpytorch.means.Mean] = None,
         slope_covar_module: Optional[gpytorch.kernels.Kernel] = None,
@@ -548,18 +538,15 @@ 

Source code for aepsych.models.semi_p

         offset_covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
         slope_mean: float = 2,
-        inducing_size: Optional[int] = None,
+        inducing_point_method: Optional[InducingPointAllocator] = None,
+        inducing_size: int = 100,
         max_fit_time: Optional[float] = None,
         optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize HadamardSemiPModel.
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            inducing_point_method (InducingPointAllocator): The method to use to select the inducing points.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub.
+            dim (int): The number of dimensions in the parameter space.
             stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
             slope_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for slope.
             slope_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for slope.
@@ -567,16 +554,15 @@ 

Source code for aepsych.models.semi_p

             offset_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for offset.
             likelihood (gpytorch.likelihood.Likelihood, optional)): defaults to bernoulli with logistic input and a floor of .5
             slope_mean (float): The mean of the slope. Defaults to 2.
-            inducing_size (int, optional): Number of inducing points. Defaults to 99.
+            inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points.
+                If not set, a GreedyVarianceReduction is made.
+            inducing_size (int): Number of inducing points. Defaults to 100.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
             optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
-        self.inducing_point_method = inducing_point_method
         super().__init__(
-            lb=lb,
-            ub=ub,
             dim=dim,
             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
@@ -674,11 +660,11 @@ 

Source code for aepsych.models.semi_p

         """
 
         classname = cls.__name__
-        inducing_size = config.getint(classname, "inducing_size", fallback=None)
+        inducing_size = config.getint(classname, "inducing_size", fallback=100)
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
+        if dim is None:
+            dim = get_dims(config)
 
         slope_mean_module = config.getobj(classname, "slope_mean_module", fallback=None)
         slope_covar_module = config.getobj(
@@ -694,7 +680,7 @@ 

Source code for aepsych.models.semi_p

         max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
         inducing_point_method_class = config.getobj(
-            classname, "inducing_point_method", fallback=AutoAllocator
+            classname, "inducing_point_method", fallback=GreedyVarianceReduction
         )
         # Check if allocator class has a `from_config` method
         if hasattr(inducing_point_method_class, "from_config"):
@@ -714,8 +700,6 @@ 

Source code for aepsych.models.semi_p

         optimizer_options = get_optimizer_options(config, classname)
 
         return cls(
-            lb=lb,
-            ub=ub,
             stim_dim=stim_dim,
             dim=dim,
             slope_mean_module=slope_mean_module,
diff --git a/api/_modules/aepsych/models/semi_p/index.html b/api/_modules/aepsych/models/semi_p/index.html
index 4f80b16e1..2f7d5bd39 100644
--- a/api/_modules/aepsych/models/semi_p/index.html
+++ b/api/_modules/aepsych/models/semi_p/index.html
@@ -25,6 +25,7 @@ 

Source code for aepsych.models.semi_p

 
 from __future__ import annotations
 
+import warnings
 from copy import deepcopy
 from typing import Any, Dict, Optional, Tuple
 
@@ -36,11 +37,11 @@ 

Source code for aepsych.models.semi_p

 from aepsych.config import Config
 from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
 from aepsych.models import GPClassificationModel
-from aepsych.models.inducing_point_allocators import AutoAllocator
-from aepsych.utils import _process_bounds, get_optimizer_options, promote_0d
+from aepsych.models.inducing_points import GreedyVarianceReduction
+from aepsych.models.inducing_points.base import InducingPointAllocator
+from aepsych.utils import get_dims, get_optimizer_options, promote_0d
 from aepsych.utils_logging import getLogger
 from botorch.acquisition.objective import PosteriorTransform
-from botorch.models.utils.inducing_point_allocators import InducingPointAllocator
 from botorch.optim.fit import fit_gpytorch_mll_scipy
 from botorch.posteriors import GPyTorchPosterior
 from gpytorch.distributions import MultivariateNormal
@@ -269,27 +270,21 @@ 

Source code for aepsych.models.semi_p

 
     def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        inducing_point_method: InducingPointAllocator,
-        dim: Optional[int] = None,
+        dim: int,
         stim_dim: int = 0,
         mean_module: Optional[gpytorch.means.Mean] = None,
         covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Any] = None,
         slope_mean: float = 2,
-        inducing_size: Optional[int] = None,
+        inducing_point_method: Optional[InducingPointAllocator] = None,
+        inducing_size: int = 100,
         max_fit_time: Optional[float] = None,
         optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize SemiParametricGP.
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            inducing_point_method (InducingPointAllocator): The method to use to select the inducing points.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub. Defaults to None.
+            dim (int, optional): The number of dimensions in the parameter space.
             stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
             mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
             covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
@@ -297,16 +292,18 @@ 

Source code for aepsych.models.semi_p

             likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
                 linear-Bernouli likelihood with probit link.
             slope_mean (float): The mean of the slope. Defaults to 2.
-            inducing_size (int, optional): Number of inducing points. Defaults to 99.
+            inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points.
+                If not set, a GreedyVarianceReduction is made.
+            inducing_size (int): Number of inducing points. Defaults to 100.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
             optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
 
-        lb, ub, dim = _process_bounds(lb, ub, dim)
+        self.dim = dim
         self.stim_dim = stim_dim
-        self.context_dims = list(range(dim))
+        self.context_dims = list(range(self.dim))
         self.context_dims.pop(stim_dim)
 
         if mean_module is None:
@@ -319,7 +316,7 @@ 

Source code for aepsych.models.semi_p

         if covar_module is None:
             covar_module = ScaleKernel(
                 RBFKernel(
-                    ard_num_dims=dim - 1,
+                    ard_num_dims=self.dim - 1,
                     lengthscale_prior=GammaPrior(3, 6),
                     active_dims=self.context_dims,  # Operate only on x_s
                     batch_shape=torch.Size([2]),
@@ -331,11 +328,8 @@ 

Source code for aepsych.models.semi_p

         assert isinstance(
             likelihood, LinearBernoulliLikelihood
         ), "SemiP model only supports linear Bernoulli likelihoods!"
-        self.inducing_point_method = inducing_point_method
 
         super().__init__(
-            lb=lb,
-            ub=ub,
             dim=dim,
             mean_module=mean_module,
             covar_module=covar_module,
@@ -361,16 +355,17 @@ 

Source code for aepsych.models.semi_p

         """
 
         classname = cls.__name__
-        inducing_size = config.getint(classname, "inducing_size", fallback=None)
+        inducing_size = config.getint(classname, "inducing_size", fallback=100)
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
 
+        if dim is None:
+            dim = get_dims(config)
+
         max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
         inducing_point_method_class = config.getobj(
-            classname, "inducing_point_method", fallback=AutoAllocator
+            classname, "inducing_point_method", fallback=GreedyVarianceReduction
         )
         # Check if allocator class has a `from_config` method
         if hasattr(inducing_point_method_class, "from_config"):
@@ -393,8 +388,6 @@ 

Source code for aepsych.models.semi_p

         optimizer_options = get_optimizer_options(config, classname)
 
         return cls(
-            lb=lb,
-            ub=ub,
             stim_dim=stim_dim,
             dim=dim,
             likelihood=likelihood,
@@ -537,10 +530,7 @@ 

Source code for aepsych.models.semi_p

 
     def __init__(
         self,
-        lb: torch.Tensor,
-        ub: torch.Tensor,
-        inducing_point_method: InducingPointAllocator,
-        dim: Optional[int] = None,
+        dim: int,
         stim_dim: int = 0,
         slope_mean_module: Optional[gpytorch.means.Mean] = None,
         slope_covar_module: Optional[gpytorch.kernels.Kernel] = None,
@@ -548,18 +538,15 @@ 

Source code for aepsych.models.semi_p

         offset_covar_module: Optional[gpytorch.kernels.Kernel] = None,
         likelihood: Optional[Likelihood] = None,
         slope_mean: float = 2,
-        inducing_size: Optional[int] = None,
+        inducing_point_method: Optional[InducingPointAllocator] = None,
+        inducing_size: int = 100,
         max_fit_time: Optional[float] = None,
         optimizer_options: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initialize HadamardSemiPModel.
         Args:
-            lb (torch.Tensor): Lower bounds of the parameters.
-            ub (torch.Tensor): Upper bounds of the parameters.
-            inducing_point_method (InducingPointAllocator): The method to use to select the inducing points.
-            dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
-                of lb and ub.
+            dim (int): The number of dimensions in the parameter space.
             stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
             slope_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for slope.
             slope_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for slope.
@@ -567,16 +554,15 @@ 

Source code for aepsych.models.semi_p

             offset_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for offset.
             likelihood (gpytorch.likelihood.Likelihood, optional)): defaults to bernoulli with logistic input and a floor of .5
             slope_mean (float): The mean of the slope. Defaults to 2.
-            inducing_size (int, optional): Number of inducing points. Defaults to 99.
+            inducing_point_method (InducingPointAllocator, optional): The method to use for selecting inducing points.
+                If not set, a GreedyVarianceReduction is made.
+            inducing_size (int): Number of inducing points. Defaults to 100.
             max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
                 there is no limit to the fitting time.
             optimizer_options (Dict[str, Any], optional): Optimizer options to pass to the SciPy optimizer during
                 fitting. Assumes we are using L-BFGS-B.
         """
-        self.inducing_point_method = inducing_point_method
         super().__init__(
-            lb=lb,
-            ub=ub,
             dim=dim,
             inducing_size=inducing_size,
             max_fit_time=max_fit_time,
@@ -674,11 +660,11 @@ 

Source code for aepsych.models.semi_p

         """
 
         classname = cls.__name__
-        inducing_size = config.getint(classname, "inducing_size", fallback=None)
+        inducing_size = config.getint(classname, "inducing_size", fallback=100)
 
-        lb = config.gettensor(classname, "lb")
-        ub = config.gettensor(classname, "ub")
         dim = config.getint(classname, "dim", fallback=None)
+        if dim is None:
+            dim = get_dims(config)
 
         slope_mean_module = config.getobj(classname, "slope_mean_module", fallback=None)
         slope_covar_module = config.getobj(
@@ -694,7 +680,7 @@ 

Source code for aepsych.models.semi_p

         max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
 
         inducing_point_method_class = config.getobj(
-            classname, "inducing_point_method", fallback=AutoAllocator
+            classname, "inducing_point_method", fallback=GreedyVarianceReduction
         )
         # Check if allocator class has a `from_config` method
         if hasattr(inducing_point_method_class, "from_config"):
@@ -714,8 +700,6 @@ 

Source code for aepsych.models.semi_p

         optimizer_options = get_optimizer_options(config, classname)
 
         return cls(
-            lb=lb,
-            ub=ub,
             stim_dim=stim_dim,
             dim=dim,
             slope_mean_module=slope_mean_module,
diff --git a/api/_modules/aepsych/plotting.html b/api/_modules/aepsych/plotting.html
index 012a40cbd..f516e4ff7 100644
--- a/api/_modules/aepsych/plotting.html
+++ b/api/_modules/aepsych/plotting.html
@@ -31,7 +31,6 @@ 

Source code for aepsych.plotting

 from aepsych.strategy import Strategy
 from aepsych.utils import get_lse_contour, get_lse_interval, make_scaled_sobol
 from matplotlib.axes import Axes
-
 from matplotlib.image import AxesImage
 from scipy.stats import norm
 
diff --git a/api/_modules/aepsych/plotting/index.html b/api/_modules/aepsych/plotting/index.html
index 012a40cbd..f516e4ff7 100644
--- a/api/_modules/aepsych/plotting/index.html
+++ b/api/_modules/aepsych/plotting/index.html
@@ -31,7 +31,6 @@ 

Source code for aepsych.plotting

 from aepsych.strategy import Strategy
 from aepsych.utils import get_lse_contour, get_lse_interval, make_scaled_sobol
 from matplotlib.axes import Axes
-
 from matplotlib.image import AxesImage
 from scipy.stats import norm
 
diff --git a/api/_modules/aepsych/strategy.html b/api/_modules/aepsych/strategy.html
index 0b3f9b519..f556539b9 100644
--- a/api/_modules/aepsych/strategy.html
+++ b/api/_modules/aepsych/strategy.html
@@ -27,19 +27,7 @@ 

Source code for aepsych.strategy

 
 import time
 import warnings
-from typing import (
-    Any,
-    Callable,
-    Dict,
-    List,
-    Literal,
-    Mapping,
-    Optional,
-    Sequence,
-    Tuple,
-    Type,
-    Union,
-)
+from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
 
 import numpy as np
 import torch
@@ -52,6 +40,7 @@ 

Source code for aepsych.strategy

 from aepsych.config import Config
 from aepsych.generators.base import AEPsychGenerator
 from aepsych.models.base import AEPsychMixin
+from aepsych.models.utils import get_extremum, get_jnd, get_max, get_min, inv_query
 from aepsych.transforms import (
     ParameterTransformedGenerator,
     ParameterTransformedModel,
@@ -285,6 +274,7 @@ 

Source code for aepsych.strategy

             )
 
         self.name = name
+        self.bounds = torch.stack([self.lb, self.ub])
 
 
[docs] def normalize_inputs( self, x: torch.Tensor, y: torch.Tensor @@ -356,7 +346,7 @@

Source code for aepsych.strategy

         probability_space: bool = False,
         max_time: Optional[float] = None,
     ) -> Tuple[float, torch.Tensor]:
-        """Get the maximum value of the acquisition function.
+        """Return the maximum of the modeled function, subject to constraints
 
         Args:
             constraints (Mapping[int, float], optional): Which parameters to fix at specfic points. Defaults to None.
@@ -364,16 +354,22 @@ 

Source code for aepsych.strategy

             max_time (float, optional): Maximum time to run the optimization. Defaults to None.
 
         Returns:
-            Tuple[float, torch.Tensor]: The maximum value of the acquisition function and the corresponding input.
+            Tuple[float, torch.Tensor]: Tuple containing the max and its location (argmax).
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the max without a model!"
         self.model.to(self.model_device)
-        return self.model.get_max(
-            constraints, probability_space=probability_space, max_time=max_time
-        )
+ + val, arg = get_max( + self.model, + self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def get_min( @@ -382,24 +378,27 @@

Source code for aepsych.strategy

         probability_space: bool = False,
         max_time: Optional[float] = None,
     ) -> Tuple[float, torch.Tensor]:
-        """Get the minimum value of the acquisition function.
+        """Return the minimum of the modeled function, subject to constraints
 
         Args:
             constraints (Mapping[int, float], optional): Which parameters to fix at specific points. Defaults to None.
             probability_space (bool): Whether to return the min in probability space. Defaults to False.
             max_time (float, optional): Maximum time to run the optimization. Defaults to None.
-
-        Returns:
-            Tuple[float, torch.Tensor]: The minimum value of the acquisition function and the corresponding input.
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the min without a model!"
         self.model.to(self.model_device)
-        return self.model.get_min(
-            constraints, probability_space=probability_space, max_time=max_time
-        )
+ + val, arg = get_min( + self.model, + self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def inv_query( @@ -420,14 +419,21 @@

Source code for aepsych.strategy

         Returns:
             Tuple[float, torch.Tensor]: The input that corresponds to the given output value and the corresponding output.
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the inv_query without a model!"
         self.model.to(self.model_device)
-        return self.model.inv_query(
-            y, constraints, probability_space, max_time=max_time
-        )
+ + val, arg = inv_query( + model=self.model, + y=y, + bounds=self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def predict(self, x: torch.Tensor, probability_space: bool = False) -> torch.Tensor: @@ -457,7 +463,9 @@

Source code for aepsych.strategy

             self.model is not None
         ), "model is None! Cannot get the get jnd without a model!"
         self.model.to(self.model_device)
-        return self.model.get_jnd(*args, **kwargs)
+ return get_jnd( # type: ignore + model=self.model, lb=self.lb, ub=self.ub, dim=self.dim, *args, **kwargs + )
[docs] @ensure_model_is_fresh def sample( diff --git a/api/_modules/aepsych/strategy/index.html b/api/_modules/aepsych/strategy/index.html index 0b3f9b519..f556539b9 100644 --- a/api/_modules/aepsych/strategy/index.html +++ b/api/_modules/aepsych/strategy/index.html @@ -27,19 +27,7 @@

Source code for aepsych.strategy

 
 import time
 import warnings
-from typing import (
-    Any,
-    Callable,
-    Dict,
-    List,
-    Literal,
-    Mapping,
-    Optional,
-    Sequence,
-    Tuple,
-    Type,
-    Union,
-)
+from typing import Any, Callable, List, Mapping, Optional, Tuple, Union
 
 import numpy as np
 import torch
@@ -52,6 +40,7 @@ 

Source code for aepsych.strategy

 from aepsych.config import Config
 from aepsych.generators.base import AEPsychGenerator
 from aepsych.models.base import AEPsychMixin
+from aepsych.models.utils import get_extremum, get_jnd, get_max, get_min, inv_query
 from aepsych.transforms import (
     ParameterTransformedGenerator,
     ParameterTransformedModel,
@@ -285,6 +274,7 @@ 

Source code for aepsych.strategy

             )
 
         self.name = name
+        self.bounds = torch.stack([self.lb, self.ub])
 
 
[docs] def normalize_inputs( self, x: torch.Tensor, y: torch.Tensor @@ -356,7 +346,7 @@

Source code for aepsych.strategy

         probability_space: bool = False,
         max_time: Optional[float] = None,
     ) -> Tuple[float, torch.Tensor]:
-        """Get the maximum value of the acquisition function.
+        """Return the maximum of the modeled function, subject to constraints
 
         Args:
             constraints (Mapping[int, float], optional): Which parameters to fix at specfic points. Defaults to None.
@@ -364,16 +354,22 @@ 

Source code for aepsych.strategy

             max_time (float, optional): Maximum time to run the optimization. Defaults to None.
 
         Returns:
-            Tuple[float, torch.Tensor]: The maximum value of the acquisition function and the corresponding input.
+            Tuple[float, torch.Tensor]: Tuple containing the max and its location (argmax).
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the max without a model!"
         self.model.to(self.model_device)
-        return self.model.get_max(
-            constraints, probability_space=probability_space, max_time=max_time
-        )
+ + val, arg = get_max( + self.model, + self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def get_min( @@ -382,24 +378,27 @@

Source code for aepsych.strategy

         probability_space: bool = False,
         max_time: Optional[float] = None,
     ) -> Tuple[float, torch.Tensor]:
-        """Get the minimum value of the acquisition function.
+        """Return the minimum of the modeled function, subject to constraints
 
         Args:
             constraints (Mapping[int, float], optional): Which parameters to fix at specific points. Defaults to None.
             probability_space (bool): Whether to return the min in probability space. Defaults to False.
             max_time (float, optional): Maximum time to run the optimization. Defaults to None.
-
-        Returns:
-            Tuple[float, torch.Tensor]: The minimum value of the acquisition function and the corresponding input.
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the min without a model!"
         self.model.to(self.model_device)
-        return self.model.get_min(
-            constraints, probability_space=probability_space, max_time=max_time
-        )
+ + val, arg = get_min( + self.model, + self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def inv_query( @@ -420,14 +419,21 @@

Source code for aepsych.strategy

         Returns:
             Tuple[float, torch.Tensor]: The input that corresponds to the given output value and the corresponding output.
         """
-        constraints = constraints or {}
         assert (
             self.model is not None
         ), "model is None! Cannot get the inv_query without a model!"
         self.model.to(self.model_device)
-        return self.model.inv_query(
-            y, constraints, probability_space, max_time=max_time
-        )
+ + val, arg = inv_query( + model=self.model, + y=y, + bounds=self.bounds, + locked_dims=constraints, + probability_space=probability_space, + max_time=max_time, + ) + + return val, arg
[docs] @ensure_model_is_fresh def predict(self, x: torch.Tensor, probability_space: bool = False) -> torch.Tensor: @@ -457,7 +463,9 @@

Source code for aepsych.strategy

             self.model is not None
         ), "model is None! Cannot get the get jnd without a model!"
         self.model.to(self.model_device)
-        return self.model.get_jnd(*args, **kwargs)
+ return get_jnd( # type: ignore + model=self.model, lb=self.lb, ub=self.ub, dim=self.dim, *args, **kwargs + )
[docs] @ensure_model_is_fresh def sample( diff --git a/api/_modules/aepsych/utils.html b/api/_modules/aepsych/utils.html index 4e9164758..3f59914ee 100644 --- a/api/_modules/aepsych/utils.html +++ b/api/_modules/aepsych/utils.html @@ -423,6 +423,32 @@

Source code for aepsych.utils

     # Filter all the nones out, which could just come back as an empty dict
     options = {key: value for key, value in options.items() if value is not None}
     return options
+ + +
[docs]def get_dims(config: Config) -> int: + """Return the number of dimensions in the parameter space. This accounts for any + transforms that may modify the the parameter space for the model (e.g., Fixed + parameters will not be included). + + Args: + config (Config): The config to look for the number of dimensions. + + Return: + int: The number of dimensions in the search space. + """ + parnames = config.getlist("common", "parnames", element_type=str) + + # This is pretty weak but fixed is currently the only thing that will changed the + # search space dims, when categorial transforms go in, this function needs to be + # smarter. + try: + valid_pars = [ + parname for parname in parnames if config[parname]["par_type"] != "fixed" + ] + return len(valid_pars) + except KeyError: + # Likely old style of parameter definition, fallback to looking at a bound + return len(config.getlist("common", "lb", element_type=float))
diff --git a/api/_modules/aepsych/utils/index.html b/api/_modules/aepsych/utils/index.html index 4e9164758..3f59914ee 100644 --- a/api/_modules/aepsych/utils/index.html +++ b/api/_modules/aepsych/utils/index.html @@ -423,6 +423,32 @@

Source code for aepsych.utils

     # Filter all the nones out, which could just come back as an empty dict
     options = {key: value for key, value in options.items() if value is not None}
     return options
+ + +
[docs]def get_dims(config: Config) -> int: + """Return the number of dimensions in the parameter space. This accounts for any + transforms that may modify the the parameter space for the model (e.g., Fixed + parameters will not be included). + + Args: + config (Config): The config to look for the number of dimensions. + + Return: + int: The number of dimensions in the search space. + """ + parnames = config.getlist("common", "parnames", element_type=str) + + # This is pretty weak but fixed is currently the only thing that will changed the + # search space dims, when categorial transforms go in, this function needs to be + # smarter. + try: + valid_pars = [ + parname for parname in parnames if config[parname]["par_type"] != "fixed" + ] + return len(valid_pars) + except KeyError: + # Likely old style of parameter definition, fallback to looking at a bound + return len(config.getlist("common", "lb", element_type=float))
diff --git a/api/_modules/index.html b/api/_modules/index.html index 8a385fccd..45ab80018 100644 --- a/api/_modules/index.html +++ b/api/_modules/index.html @@ -61,7 +61,6 @@

All modules for which code is available

  • aepsych.models.derivative_gp
  • aepsych.models.gp_classification
  • aepsych.models.gp_regression
  • -
  • aepsych.models.inducing_point_allocators
  • aepsych.models.monotonic_projection_gp
  • aepsych.models.monotonic_rejection_gp
  • aepsych.models.multitask_regression
  • diff --git a/api/acquisition.html b/api/acquisition.html index 987792b30..1553a035e 100644 --- a/api/acquisition.html +++ b/api/acquisition.html @@ -288,14 +288,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    @@ -327,14 +329,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -352,14 +356,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -376,21 +382,23 @@

    Submodules +
  • If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set.

  • +
  • If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

  • + +

    Parameters:
      +
    • target (float, optional) – Threshold value to target in p-space.

    • +
    • query_set_size (int, optional) – Number of points in the query set.

    • +
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • lb (Tensor) –

    • +
    • ub (Tensor) –

    • model (botorch.models.gpytorch.GPyTorchModel) –

    • -
    • target (Optional[float]) –

    • -
    • query_set_size (Optional[int]) –

    • -
    • Xq (Optional[Tensor]) –

    • +
    • lookahead_type (Literal["levelset", "posterior"]) –

    @@ -403,14 +411,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -433,14 +443,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -460,15 +472,24 @@

    Submodules +
  • If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set.

  • +
  • If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

  • + +

    +

    target (float, optional): Threshold value to target in p-space. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. Xq (torch.Tensor, optional): (m x d) global reference set. Default is None. k (float, optional): Scaling factor for the softmax approximation, controlling the “softness” of the maximum operation. Default is 20.0.

    Parameters:
    Parameters:
      +
    • target (float, optional) – Threshold value to target in p-space.

    • +
    • query_set_size (int, optional) – Number of points in the query set.

    • +
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • lb (Tensor) –

    • +
    • ub (Tensor) –

    • model (botorch.models.gpytorch.GPyTorchModel) –

    • -
    • target (Optional[float]) –

    • -
    • query_set_size (Optional[int]) –

    • -
    • Xq (Optional[Tensor]) –

    • +
    • lookahead_type (Literal["levelset", "posterior"]) –

    @@ -2078,14 +2107,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    diff --git a/api/acquisition/index.html b/api/acquisition/index.html index 987792b30..1553a035e 100644 --- a/api/acquisition/index.html +++ b/api/acquisition/index.html @@ -288,14 +288,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    @@ -327,14 +329,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -352,14 +356,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -376,21 +382,23 @@

    Submodules +
  • If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set.

  • +
  • If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

  • + +

    Parameters:
      +
    • target (float, optional) – Threshold value to target in p-space.

    • +
    • query_set_size (int, optional) – Number of points in the query set.

    • +
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • lb (Tensor) –

    • +
    • ub (Tensor) –

    • model (botorch.models.gpytorch.GPyTorchModel) –

    • -
    • target (Optional[float]) –

    • -
    • query_set_size (Optional[int]) –

    • -
    • Xq (Optional[Tensor]) –

    • +
    • lookahead_type (Literal["levelset", "posterior"]) –

    @@ -403,14 +411,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -433,14 +443,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    @@ -460,15 +472,24 @@

    Submodules +
  • If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set.

  • +
  • If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

  • + +

    +

    target (float, optional): Threshold value to target in p-space. Default is None. query_set_size (int, optional): Number of points in the query set. Default is 256. Xq (torch.Tensor, optional): (m x d) global reference set. Default is None. k (float, optional): Scaling factor for the softmax approximation, controlling the “softness” of the maximum operation. Default is 20.0.

    Parameters:
    Parameters:
      +
    • target (float, optional) – Threshold value to target in p-space.

    • +
    • query_set_size (int, optional) – Number of points in the query set.

    • +
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • lb (Tensor) –

    • +
    • ub (Tensor) –

    • model (botorch.models.gpytorch.GPyTorchModel) –

    • -
    • target (Optional[float]) –

    • -
    • query_set_size (Optional[int]) –

    • -
    • Xq (Optional[Tensor]) –

    • +
    • lookahead_type (Literal["levelset", "posterior"]) –

    @@ -2078,14 +2107,16 @@

    Submodules
    Parameters:
      -
    • model (GPyTorchModel) – The gpytorch model to use.

    • +
    • lb (Tensor) – Lower bounds of the input space, used to generate the query set (Xq).

    • +
    • ub (Tensor) – Upper bounds of the input space, used to generate the query set (Xq).

    • +
    • model (GPyTorchModel) – The gpytorch model.

    • lookahead_type (Literal["levelset", "posterior"]) – The type of look-ahead to perform (default is “levelset”). - If the lookahead_type is “levelset”, the acqf will consider the posterior probability that a point is above or below the target level set. - If the lookahead_type is “posterior”, the acqf will consider the posterior probability that a point will be detected or not.

    • target (float, optional) – Threshold value to target in p-space.

    • -
    • posterior_transform (PosteriorTransform, optional) – Optional transformation to apply to the posterior.

    • -
    • query_set_size (int, optional) – Number of points in the query set.

    • -
    • Xq (torch.Tensor, optional) – (m x d) global reference set.

    • +
    • posterior_transform (PosteriorTransform, optional) – Posterior transform to use. Defaults to None.

    • +
    • query_set_size (int, optional) – Size of the query set. Defaults to 256.

    • +
    • Xq (Tensor, optional) – (m x d) global reference set. Defaults to None.

    • args (Any) –

    • kwargs (Any) –

    diff --git a/api/config.html b/api/config.html index d9e433288..15f4d011c 100644 --- a/api/config.html +++ b/api/config.html @@ -40,7 +40,7 @@

    aepsych.config
    -registered_names: ClassVar[Dict[str, object]] = {'AEPsychObjective': <class 'aepsych.acquisition.objective.objective.AEPsychObjective'>, 'AcqfThompsonSamplerGenerator': <class 'aepsych.generators.acqf_thompson_sampler_generator.AcqfThompsonSamplerGenerator'>, 'AdditiveKernel': <class 'gpytorch.kernels.kernel.AdditiveKernel'>, 'AdditiveStructureKernel': <class 'gpytorch.kernels.additive_structure_kernel.AdditiveStructureKernel'>, 'ApproxGlobalSUR': <class 'aepsych.acquisition.lookahead.ApproxGlobalSUR'>, 'ArcKernel': <class 'gpytorch.kernels.arc_kernel.ArcKernel'>, 'AutoAllocator': <class 'aepsych.models.inducing_point_allocators.AutoAllocator'>, 'BernoulliLikelihood': <class 'gpytorch.likelihoods.bernoulli_likelihood.BernoulliLikelihood'>, 'BernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.BernoulliMCMutualInformation'>, 'BernoulliObjectiveLikelihood': <class 'aepsych.likelihoods.bernoulli.BernoulliObjectiveLikelihood'>, 'BetaLikelihood': <class 'gpytorch.likelihoods.beta_likelihood.BetaLikelihood'>, 'ConstantKernel': <class 'gpytorch.kernels.constant_kernel.ConstantKernel'>, 'CosineKernel': <class 'gpytorch.kernels.cosine_kernel.CosineKernel'>, 'CylindricalKernel': <class 'gpytorch.kernels.cylindrical_kernel.CylindricalKernel'>, 'DirichletClassificationLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.DirichletClassificationLikelihood'>, 'DistributionalInputKernel': <class 'gpytorch.kernels.distributional_input_kernel.DistributionalInputKernel'>, 'DummyAllocator': <class 'aepsych.models.inducing_point_allocators.DummyAllocator'>, 'EAVC': <class 'aepsych.acquisition.lookahead.EAVC'>, 'EpsilonGreedyGenerator': <class 'aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator'>, 'FixedAllocator': <class 'aepsych.models.inducing_point_allocators.FixedAllocator'>, 'FixedNoiseGaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.FixedNoiseGaussianLikelihood'>, 'FloorGumbelObjective': <class 'aepsych.acquisition.objective.objective.FloorGumbelObjective'>, 'FloorLogitObjective': <class 'aepsych.acquisition.objective.objective.FloorLogitObjective'>, 'FloorProbitObjective': <class 'aepsych.acquisition.objective.objective.FloorProbitObjective'>, 'GPBetaRegressionModel': <class 'aepsych.models.gp_classification.GPBetaRegressionModel'>, 'GPClassificationModel': <class 'aepsych.models.gp_classification.GPClassificationModel'>, 'GPRegressionModel': <class 'aepsych.models.gp_regression.GPRegressionModel'>, 'GaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihood'>, 'GaussianLikelihoodWithMissingObs': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihoodWithMissingObs'>, 'GaussianSymmetrizedKLKernel': <class 'gpytorch.kernels.gaussian_symmetrized_kl_kernel.GaussianSymmetrizedKLKernel'>, 'GlobalMI': <class 'aepsych.acquisition.lookahead.GlobalMI'>, 'GlobalSUR': <class 'aepsych.acquisition.lookahead.GlobalSUR'>, 'GreedyVarianceReduction': <class 'aepsych.models.inducing_point_allocators.GreedyVarianceReduction'>, 'GridInterpolationKernel': <class 'gpytorch.kernels.grid_interpolation_kernel.GridInterpolationKernel'>, 'GridKernel': <class 'gpytorch.kernels.grid_kernel.GridKernel'>, 'HadamardSemiPModel': <class 'aepsych.models.semi_p.HadamardSemiPModel'>, 'HammingIMQKernel': <class 'gpytorch.kernels.hamming_kernel.HammingIMQKernel'>, 'HeteroskedasticNoise': <class 'gpytorch.likelihoods.noise_models.HeteroskedasticNoise'>, 'IndependentMultitaskGPRModel': <class 'aepsych.models.multitask_regression.IndependentMultitaskGPRModel'>, 'IndexKernel': <class 'gpytorch.kernels.index_kernel.IndexKernel'>, 'InducingPointKernel': <class 'gpytorch.kernels.inducing_point_kernel.InducingPointKernel'>, 'IntensityAwareSemiPGenerator': <class 'aepsych.generators.semi_p.IntensityAwareSemiPGenerator'>, 'KMeansAllocator': <class 'aepsych.models.inducing_point_allocators.KMeansAllocator'>, 'Kernel': <class 'gpytorch.kernels.kernel.Kernel'>, 'LCMKernel': <class 'gpytorch.kernels.lcm_kernel.LCMKernel'>, 'LaplaceLikelihood': <class 'gpytorch.likelihoods.laplace_likelihood.LaplaceLikelihood'>, 'Likelihood': <class 'gpytorch.likelihoods.likelihood.Likelihood'>, 'LikelihoodList': <class 'gpytorch.likelihoods.likelihood_list.LikelihoodList'>, 'LinearBernoulliLikelihood': <class 'aepsych.likelihoods.semi_p.LinearBernoulliLikelihood'>, 'LinearKernel': <class 'gpytorch.kernels.linear_kernel.LinearKernel'>, 'LocalMI': <class 'aepsych.acquisition.lookahead.LocalMI'>, 'LocalSUR': <class 'aepsych.acquisition.lookahead.LocalSUR'>, 'MCLevelSetEstimation': <class 'aepsych.acquisition.lse.MCLevelSetEstimation'>, 'MCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MCPosteriorVariance'>, 'ManualGenerator': <class 'aepsych.generators.manual_generator.ManualGenerator'>, 'Matern52KernelGrad': <class 'gpytorch.kernels.matern52_kernel_grad.Matern52KernelGrad'>, 'MaternKernel': <class 'gpytorch.kernels.matern_kernel.MaternKernel'>, 'MonotonicBernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.MonotonicBernoulliMCMutualInformation'>, 'MonotonicMCLSE': <class 'aepsych.acquisition.monotonic_rejection.MonotonicMCLSE'>, 'MonotonicMCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MonotonicMCPosteriorVariance'>, 'MonotonicProjectionGP': <class 'aepsych.models.monotonic_projection_gp.MonotonicProjectionGP'>, 'MonotonicRejectionGP': <class 'aepsych.models.monotonic_rejection_gp.MonotonicRejectionGP'>, 'MonotonicRejectionGenerator': <class 'aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator'>, 'MonotonicThompsonSamplerGenerator': <class 'aepsych.generators.monotonic_thompson_sampler_generator.MonotonicThompsonSamplerGenerator'>, 'MultiDeviceKernel': <class 'gpytorch.kernels.multi_device_kernel.MultiDeviceKernel'>, 'MultitaskGPRModel': <class 'aepsych.models.multitask_regression.MultitaskGPRModel'>, 'MultitaskGaussianLikelihood': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood.MultitaskGaussianLikelihood'>, 'MultitaskKernel': <class 'gpytorch.kernels.multitask_kernel.MultitaskKernel'>, 'NewtonGirardAdditiveKernel': <class 'gpytorch.kernels.newton_girard_additive_kernel.NewtonGirardAdditiveKernel'>, 'None': None, 'OptimizeAcqfGenerator': <class 'aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator'>, 'OrdinalGPModel': <class 'aepsych.models.ordinal_gp.OrdinalGPModel'>, 'OrdinalLikelihood': <class 'aepsych.likelihoods.ordinal.OrdinalLikelihood'>, 'PairwiseOptimizeAcqfGenerator': <class 'aepsych.generators.pairwise_optimize_acqf_generator.PairwiseOptimizeAcqfGenerator'>, 'PairwiseProbitModel': <class 'aepsych.models.pairwise_probit.PairwiseProbitModel'>, 'PairwiseSobolGenerator': <class 'aepsych.generators.pairwise_sobol_generator.PairwiseSobolGenerator'>, 'PeriodicKernel': <class 'gpytorch.kernels.periodic_kernel.PeriodicKernel'>, 'PiecewisePolynomialKernel': <class 'gpytorch.kernels.piecewise_polynomial_kernel.PiecewisePolynomialKernel'>, 'PolynomialKernel': <class 'gpytorch.kernels.polynomial_kernel.PolynomialKernel'>, 'PolynomialKernelGrad': <class 'gpytorch.kernels.polynomial_kernel_grad.PolynomialKernelGrad'>, 'ProbitObjective': <class 'aepsych.acquisition.objective.objective.ProbitObjective'>, 'ProductKernel': <class 'gpytorch.kernels.kernel.ProductKernel'>, 'ProductStructureKernel': <class 'gpytorch.kernels.product_structure_kernel.ProductStructureKernel'>, 'RBFKernel': <class 'gpytorch.kernels.rbf_kernel.RBFKernel'>, 'RBFKernelGrad': <class 'gpytorch.kernels.rbf_kernel_grad.RBFKernelGrad'>, 'RBFKernelGradGrad': <class 'gpytorch.kernels.rbf_kernel_gradgrad.RBFKernelGradGrad'>, 'RFFKernel': <class 'gpytorch.kernels.rff_kernel.RFFKernel'>, 'RQKernel': <class 'gpytorch.kernels.rq_kernel.RQKernel'>, 'RandomGenerator': <class 'aepsych.generators.random_generator.RandomGenerator'>, 'SampleAroundPointsGenerator': <class 'aepsych.generators.manual_generator.SampleAroundPointsGenerator'>, 'ScaleKernel': <class 'gpytorch.kernels.scale_kernel.ScaleKernel'>, 'SemiPProbabilityObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPProbabilityObjective'>, 'SemiPThresholdObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPThresholdObjective'>, 'SemiParametricGPModel': <class 'aepsych.models.semi_p.SemiParametricGPModel'>, 'SequentialStrategy': <class 'aepsych.strategy.SequentialStrategy'>, 'SobolAllocator': <class 'aepsych.models.inducing_point_allocators.SobolAllocator'>, 'SobolGenerator': <class 'aepsych.generators.sobol_generator.SobolGenerator'>, 'SoftmaxLikelihood': <class 'gpytorch.likelihoods.softmax_likelihood.SoftmaxLikelihood'>, 'SpectralDeltaKernel': <class 'gpytorch.kernels.spectral_delta_kernel.SpectralDeltaKernel'>, 'SpectralMixtureKernel': <class 'gpytorch.kernels.spectral_mixture_kernel.SpectralMixtureKernel'>, 'Strategy': <class 'aepsych.strategy.Strategy'>, 'StudentTLikelihood': <class 'gpytorch.likelihoods.student_t_likelihood.StudentTLikelihood'>, '_GaussianLikelihoodBase': <class 'gpytorch.likelihoods.gaussian_likelihood._GaussianLikelihoodBase'>, '_MultitaskGaussianLikelihoodBase': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood._MultitaskGaussianLikelihoodBase'>, '_OneDimensionalLikelihood': <class 'gpytorch.likelihoods.likelihood._OneDimensionalLikelihood'>, 'default_mean_covar_factory': <function default_mean_covar_factory>, 'monotonic_mean_covar_factory': <function monotonic_mean_covar_factory>, 'ordinal_mean_covar_factory': <function ordinal_mean_covar_factory>, 'pairwise_mean_covar_factory': <function pairwise_mean_covar_factory>, 'semi_p_posterior_transform': <function semi_p_posterior_transform>, 'song_mean_covar_factory': <function song_mean_covar_factory>}
    +registered_names: ClassVar[Dict[str, object]] = {'AEPsychObjective': <class 'aepsych.acquisition.objective.objective.AEPsychObjective'>, 'AcqfThompsonSamplerGenerator': <class 'aepsych.generators.acqf_thompson_sampler_generator.AcqfThompsonSamplerGenerator'>, 'AdditiveKernel': <class 'gpytorch.kernels.kernel.AdditiveKernel'>, 'AdditiveStructureKernel': <class 'gpytorch.kernels.additive_structure_kernel.AdditiveStructureKernel'>, 'ApproxGlobalSUR': <class 'aepsych.acquisition.lookahead.ApproxGlobalSUR'>, 'ArcKernel': <class 'gpytorch.kernels.arc_kernel.ArcKernel'>, 'BernoulliLikelihood': <class 'gpytorch.likelihoods.bernoulli_likelihood.BernoulliLikelihood'>, 'BernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.BernoulliMCMutualInformation'>, 'BernoulliObjectiveLikelihood': <class 'aepsych.likelihoods.bernoulli.BernoulliObjectiveLikelihood'>, 'BetaLikelihood': <class 'gpytorch.likelihoods.beta_likelihood.BetaLikelihood'>, 'ConstantKernel': <class 'gpytorch.kernels.constant_kernel.ConstantKernel'>, 'CosineKernel': <class 'gpytorch.kernels.cosine_kernel.CosineKernel'>, 'CylindricalKernel': <class 'gpytorch.kernels.cylindrical_kernel.CylindricalKernel'>, 'DirichletClassificationLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.DirichletClassificationLikelihood'>, 'DistributionalInputKernel': <class 'gpytorch.kernels.distributional_input_kernel.DistributionalInputKernel'>, 'EAVC': <class 'aepsych.acquisition.lookahead.EAVC'>, 'EpsilonGreedyGenerator': <class 'aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator'>, 'FixedAllocator': <class 'aepsych.models.inducing_points.fixed.FixedAllocator'>, 'FixedNoiseGaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.FixedNoiseGaussianLikelihood'>, 'FloorGumbelObjective': <class 'aepsych.acquisition.objective.objective.FloorGumbelObjective'>, 'FloorLogitObjective': <class 'aepsych.acquisition.objective.objective.FloorLogitObjective'>, 'FloorProbitObjective': <class 'aepsych.acquisition.objective.objective.FloorProbitObjective'>, 'GPBetaRegressionModel': <class 'aepsych.models.gp_classification.GPBetaRegressionModel'>, 'GPClassificationModel': <class 'aepsych.models.gp_classification.GPClassificationModel'>, 'GPRegressionModel': <class 'aepsych.models.gp_regression.GPRegressionModel'>, 'GaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihood'>, 'GaussianLikelihoodWithMissingObs': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihoodWithMissingObs'>, 'GaussianSymmetrizedKLKernel': <class 'gpytorch.kernels.gaussian_symmetrized_kl_kernel.GaussianSymmetrizedKLKernel'>, 'GlobalMI': <class 'aepsych.acquisition.lookahead.GlobalMI'>, 'GlobalSUR': <class 'aepsych.acquisition.lookahead.GlobalSUR'>, 'GreedyVarianceReduction': <class 'aepsych.models.inducing_points.greedy_variance_reduction.GreedyVarianceReduction'>, 'GridInterpolationKernel': <class 'gpytorch.kernels.grid_interpolation_kernel.GridInterpolationKernel'>, 'GridKernel': <class 'gpytorch.kernels.grid_kernel.GridKernel'>, 'HadamardSemiPModel': <class 'aepsych.models.semi_p.HadamardSemiPModel'>, 'HammingIMQKernel': <class 'gpytorch.kernels.hamming_kernel.HammingIMQKernel'>, 'HeteroskedasticNoise': <class 'gpytorch.likelihoods.noise_models.HeteroskedasticNoise'>, 'IndependentMultitaskGPRModel': <class 'aepsych.models.multitask_regression.IndependentMultitaskGPRModel'>, 'IndexKernel': <class 'gpytorch.kernels.index_kernel.IndexKernel'>, 'InducingPointKernel': <class 'gpytorch.kernels.inducing_point_kernel.InducingPointKernel'>, 'IntensityAwareSemiPGenerator': <class 'aepsych.generators.semi_p.IntensityAwareSemiPGenerator'>, 'KMeansAllocator': <class 'aepsych.models.inducing_points.kmeans.KMeansAllocator'>, 'Kernel': <class 'gpytorch.kernels.kernel.Kernel'>, 'LCMKernel': <class 'gpytorch.kernels.lcm_kernel.LCMKernel'>, 'LaplaceLikelihood': <class 'gpytorch.likelihoods.laplace_likelihood.LaplaceLikelihood'>, 'Likelihood': <class 'gpytorch.likelihoods.likelihood.Likelihood'>, 'LikelihoodList': <class 'gpytorch.likelihoods.likelihood_list.LikelihoodList'>, 'LinearBernoulliLikelihood': <class 'aepsych.likelihoods.semi_p.LinearBernoulliLikelihood'>, 'LinearKernel': <class 'gpytorch.kernels.linear_kernel.LinearKernel'>, 'LocalMI': <class 'aepsych.acquisition.lookahead.LocalMI'>, 'LocalSUR': <class 'aepsych.acquisition.lookahead.LocalSUR'>, 'MCLevelSetEstimation': <class 'aepsych.acquisition.lse.MCLevelSetEstimation'>, 'MCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MCPosteriorVariance'>, 'ManualGenerator': <class 'aepsych.generators.manual_generator.ManualGenerator'>, 'Matern52KernelGrad': <class 'gpytorch.kernels.matern52_kernel_grad.Matern52KernelGrad'>, 'MaternKernel': <class 'gpytorch.kernels.matern_kernel.MaternKernel'>, 'MonotonicBernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.MonotonicBernoulliMCMutualInformation'>, 'MonotonicMCLSE': <class 'aepsych.acquisition.monotonic_rejection.MonotonicMCLSE'>, 'MonotonicMCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MonotonicMCPosteriorVariance'>, 'MonotonicProjectionGP': <class 'aepsych.models.monotonic_projection_gp.MonotonicProjectionGP'>, 'MonotonicRejectionGP': <class 'aepsych.models.monotonic_rejection_gp.MonotonicRejectionGP'>, 'MonotonicRejectionGenerator': <class 'aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator'>, 'MonotonicThompsonSamplerGenerator': <class 'aepsych.generators.monotonic_thompson_sampler_generator.MonotonicThompsonSamplerGenerator'>, 'MultiDeviceKernel': <class 'gpytorch.kernels.multi_device_kernel.MultiDeviceKernel'>, 'MultitaskGPRModel': <class 'aepsych.models.multitask_regression.MultitaskGPRModel'>, 'MultitaskGaussianLikelihood': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood.MultitaskGaussianLikelihood'>, 'MultitaskKernel': <class 'gpytorch.kernels.multitask_kernel.MultitaskKernel'>, 'NewtonGirardAdditiveKernel': <class 'gpytorch.kernels.newton_girard_additive_kernel.NewtonGirardAdditiveKernel'>, 'None': None, 'OptimizeAcqfGenerator': <class 'aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator'>, 'OrdinalGPModel': <class 'aepsych.models.ordinal_gp.OrdinalGPModel'>, 'OrdinalLikelihood': <class 'aepsych.likelihoods.ordinal.OrdinalLikelihood'>, 'PairwiseOptimizeAcqfGenerator': <class 'aepsych.generators.pairwise_optimize_acqf_generator.PairwiseOptimizeAcqfGenerator'>, 'PairwiseProbitModel': <class 'aepsych.models.pairwise_probit.PairwiseProbitModel'>, 'PairwiseSobolGenerator': <class 'aepsych.generators.pairwise_sobol_generator.PairwiseSobolGenerator'>, 'PeriodicKernel': <class 'gpytorch.kernels.periodic_kernel.PeriodicKernel'>, 'PiecewisePolynomialKernel': <class 'gpytorch.kernels.piecewise_polynomial_kernel.PiecewisePolynomialKernel'>, 'PolynomialKernel': <class 'gpytorch.kernels.polynomial_kernel.PolynomialKernel'>, 'PolynomialKernelGrad': <class 'gpytorch.kernels.polynomial_kernel_grad.PolynomialKernelGrad'>, 'ProbitObjective': <class 'aepsych.acquisition.objective.objective.ProbitObjective'>, 'ProductKernel': <class 'gpytorch.kernels.kernel.ProductKernel'>, 'ProductStructureKernel': <class 'gpytorch.kernels.product_structure_kernel.ProductStructureKernel'>, 'RBFKernel': <class 'gpytorch.kernels.rbf_kernel.RBFKernel'>, 'RBFKernelGrad': <class 'gpytorch.kernels.rbf_kernel_grad.RBFKernelGrad'>, 'RBFKernelGradGrad': <class 'gpytorch.kernels.rbf_kernel_gradgrad.RBFKernelGradGrad'>, 'RFFKernel': <class 'gpytorch.kernels.rff_kernel.RFFKernel'>, 'RQKernel': <class 'gpytorch.kernels.rq_kernel.RQKernel'>, 'RandomGenerator': <class 'aepsych.generators.random_generator.RandomGenerator'>, 'SampleAroundPointsGenerator': <class 'aepsych.generators.manual_generator.SampleAroundPointsGenerator'>, 'ScaleKernel': <class 'gpytorch.kernels.scale_kernel.ScaleKernel'>, 'SemiPProbabilityObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPProbabilityObjective'>, 'SemiPThresholdObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPThresholdObjective'>, 'SemiParametricGPModel': <class 'aepsych.models.semi_p.SemiParametricGPModel'>, 'SequentialStrategy': <class 'aepsych.strategy.SequentialStrategy'>, 'SobolAllocator': <class 'aepsych.models.inducing_points.sobol.SobolAllocator'>, 'SobolGenerator': <class 'aepsych.generators.sobol_generator.SobolGenerator'>, 'SoftmaxLikelihood': <class 'gpytorch.likelihoods.softmax_likelihood.SoftmaxLikelihood'>, 'SpectralDeltaKernel': <class 'gpytorch.kernels.spectral_delta_kernel.SpectralDeltaKernel'>, 'SpectralMixtureKernel': <class 'gpytorch.kernels.spectral_mixture_kernel.SpectralMixtureKernel'>, 'Strategy': <class 'aepsych.strategy.Strategy'>, 'StudentTLikelihood': <class 'gpytorch.likelihoods.student_t_likelihood.StudentTLikelihood'>, '_GaussianLikelihoodBase': <class 'gpytorch.likelihoods.gaussian_likelihood._GaussianLikelihoodBase'>, '_MultitaskGaussianLikelihoodBase': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood._MultitaskGaussianLikelihoodBase'>, '_OneDimensionalLikelihood': <class 'gpytorch.likelihoods.likelihood._OneDimensionalLikelihood'>, 'default_mean_covar_factory': <function default_mean_covar_factory>, 'monotonic_mean_covar_factory': <function monotonic_mean_covar_factory>, 'ordinal_mean_covar_factory': <function ordinal_mean_covar_factory>, 'pairwise_mean_covar_factory': <function pairwise_mean_covar_factory>, 'semi_p_posterior_transform': <function semi_p_posterior_transform>, 'song_mean_covar_factory': <function song_mean_covar_factory>}
    diff --git a/api/config/index.html b/api/config/index.html index d9e433288..15f4d011c 100644 --- a/api/config/index.html +++ b/api/config/index.html @@ -40,7 +40,7 @@

    aepsych.config
    -registered_names: ClassVar[Dict[str, object]] = {'AEPsychObjective': <class 'aepsych.acquisition.objective.objective.AEPsychObjective'>, 'AcqfThompsonSamplerGenerator': <class 'aepsych.generators.acqf_thompson_sampler_generator.AcqfThompsonSamplerGenerator'>, 'AdditiveKernel': <class 'gpytorch.kernels.kernel.AdditiveKernel'>, 'AdditiveStructureKernel': <class 'gpytorch.kernels.additive_structure_kernel.AdditiveStructureKernel'>, 'ApproxGlobalSUR': <class 'aepsych.acquisition.lookahead.ApproxGlobalSUR'>, 'ArcKernel': <class 'gpytorch.kernels.arc_kernel.ArcKernel'>, 'AutoAllocator': <class 'aepsych.models.inducing_point_allocators.AutoAllocator'>, 'BernoulliLikelihood': <class 'gpytorch.likelihoods.bernoulli_likelihood.BernoulliLikelihood'>, 'BernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.BernoulliMCMutualInformation'>, 'BernoulliObjectiveLikelihood': <class 'aepsych.likelihoods.bernoulli.BernoulliObjectiveLikelihood'>, 'BetaLikelihood': <class 'gpytorch.likelihoods.beta_likelihood.BetaLikelihood'>, 'ConstantKernel': <class 'gpytorch.kernels.constant_kernel.ConstantKernel'>, 'CosineKernel': <class 'gpytorch.kernels.cosine_kernel.CosineKernel'>, 'CylindricalKernel': <class 'gpytorch.kernels.cylindrical_kernel.CylindricalKernel'>, 'DirichletClassificationLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.DirichletClassificationLikelihood'>, 'DistributionalInputKernel': <class 'gpytorch.kernels.distributional_input_kernel.DistributionalInputKernel'>, 'DummyAllocator': <class 'aepsych.models.inducing_point_allocators.DummyAllocator'>, 'EAVC': <class 'aepsych.acquisition.lookahead.EAVC'>, 'EpsilonGreedyGenerator': <class 'aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator'>, 'FixedAllocator': <class 'aepsych.models.inducing_point_allocators.FixedAllocator'>, 'FixedNoiseGaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.FixedNoiseGaussianLikelihood'>, 'FloorGumbelObjective': <class 'aepsych.acquisition.objective.objective.FloorGumbelObjective'>, 'FloorLogitObjective': <class 'aepsych.acquisition.objective.objective.FloorLogitObjective'>, 'FloorProbitObjective': <class 'aepsych.acquisition.objective.objective.FloorProbitObjective'>, 'GPBetaRegressionModel': <class 'aepsych.models.gp_classification.GPBetaRegressionModel'>, 'GPClassificationModel': <class 'aepsych.models.gp_classification.GPClassificationModel'>, 'GPRegressionModel': <class 'aepsych.models.gp_regression.GPRegressionModel'>, 'GaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihood'>, 'GaussianLikelihoodWithMissingObs': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihoodWithMissingObs'>, 'GaussianSymmetrizedKLKernel': <class 'gpytorch.kernels.gaussian_symmetrized_kl_kernel.GaussianSymmetrizedKLKernel'>, 'GlobalMI': <class 'aepsych.acquisition.lookahead.GlobalMI'>, 'GlobalSUR': <class 'aepsych.acquisition.lookahead.GlobalSUR'>, 'GreedyVarianceReduction': <class 'aepsych.models.inducing_point_allocators.GreedyVarianceReduction'>, 'GridInterpolationKernel': <class 'gpytorch.kernels.grid_interpolation_kernel.GridInterpolationKernel'>, 'GridKernel': <class 'gpytorch.kernels.grid_kernel.GridKernel'>, 'HadamardSemiPModel': <class 'aepsych.models.semi_p.HadamardSemiPModel'>, 'HammingIMQKernel': <class 'gpytorch.kernels.hamming_kernel.HammingIMQKernel'>, 'HeteroskedasticNoise': <class 'gpytorch.likelihoods.noise_models.HeteroskedasticNoise'>, 'IndependentMultitaskGPRModel': <class 'aepsych.models.multitask_regression.IndependentMultitaskGPRModel'>, 'IndexKernel': <class 'gpytorch.kernels.index_kernel.IndexKernel'>, 'InducingPointKernel': <class 'gpytorch.kernels.inducing_point_kernel.InducingPointKernel'>, 'IntensityAwareSemiPGenerator': <class 'aepsych.generators.semi_p.IntensityAwareSemiPGenerator'>, 'KMeansAllocator': <class 'aepsych.models.inducing_point_allocators.KMeansAllocator'>, 'Kernel': <class 'gpytorch.kernels.kernel.Kernel'>, 'LCMKernel': <class 'gpytorch.kernels.lcm_kernel.LCMKernel'>, 'LaplaceLikelihood': <class 'gpytorch.likelihoods.laplace_likelihood.LaplaceLikelihood'>, 'Likelihood': <class 'gpytorch.likelihoods.likelihood.Likelihood'>, 'LikelihoodList': <class 'gpytorch.likelihoods.likelihood_list.LikelihoodList'>, 'LinearBernoulliLikelihood': <class 'aepsych.likelihoods.semi_p.LinearBernoulliLikelihood'>, 'LinearKernel': <class 'gpytorch.kernels.linear_kernel.LinearKernel'>, 'LocalMI': <class 'aepsych.acquisition.lookahead.LocalMI'>, 'LocalSUR': <class 'aepsych.acquisition.lookahead.LocalSUR'>, 'MCLevelSetEstimation': <class 'aepsych.acquisition.lse.MCLevelSetEstimation'>, 'MCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MCPosteriorVariance'>, 'ManualGenerator': <class 'aepsych.generators.manual_generator.ManualGenerator'>, 'Matern52KernelGrad': <class 'gpytorch.kernels.matern52_kernel_grad.Matern52KernelGrad'>, 'MaternKernel': <class 'gpytorch.kernels.matern_kernel.MaternKernel'>, 'MonotonicBernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.MonotonicBernoulliMCMutualInformation'>, 'MonotonicMCLSE': <class 'aepsych.acquisition.monotonic_rejection.MonotonicMCLSE'>, 'MonotonicMCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MonotonicMCPosteriorVariance'>, 'MonotonicProjectionGP': <class 'aepsych.models.monotonic_projection_gp.MonotonicProjectionGP'>, 'MonotonicRejectionGP': <class 'aepsych.models.monotonic_rejection_gp.MonotonicRejectionGP'>, 'MonotonicRejectionGenerator': <class 'aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator'>, 'MonotonicThompsonSamplerGenerator': <class 'aepsych.generators.monotonic_thompson_sampler_generator.MonotonicThompsonSamplerGenerator'>, 'MultiDeviceKernel': <class 'gpytorch.kernels.multi_device_kernel.MultiDeviceKernel'>, 'MultitaskGPRModel': <class 'aepsych.models.multitask_regression.MultitaskGPRModel'>, 'MultitaskGaussianLikelihood': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood.MultitaskGaussianLikelihood'>, 'MultitaskKernel': <class 'gpytorch.kernels.multitask_kernel.MultitaskKernel'>, 'NewtonGirardAdditiveKernel': <class 'gpytorch.kernels.newton_girard_additive_kernel.NewtonGirardAdditiveKernel'>, 'None': None, 'OptimizeAcqfGenerator': <class 'aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator'>, 'OrdinalGPModel': <class 'aepsych.models.ordinal_gp.OrdinalGPModel'>, 'OrdinalLikelihood': <class 'aepsych.likelihoods.ordinal.OrdinalLikelihood'>, 'PairwiseOptimizeAcqfGenerator': <class 'aepsych.generators.pairwise_optimize_acqf_generator.PairwiseOptimizeAcqfGenerator'>, 'PairwiseProbitModel': <class 'aepsych.models.pairwise_probit.PairwiseProbitModel'>, 'PairwiseSobolGenerator': <class 'aepsych.generators.pairwise_sobol_generator.PairwiseSobolGenerator'>, 'PeriodicKernel': <class 'gpytorch.kernels.periodic_kernel.PeriodicKernel'>, 'PiecewisePolynomialKernel': <class 'gpytorch.kernels.piecewise_polynomial_kernel.PiecewisePolynomialKernel'>, 'PolynomialKernel': <class 'gpytorch.kernels.polynomial_kernel.PolynomialKernel'>, 'PolynomialKernelGrad': <class 'gpytorch.kernels.polynomial_kernel_grad.PolynomialKernelGrad'>, 'ProbitObjective': <class 'aepsych.acquisition.objective.objective.ProbitObjective'>, 'ProductKernel': <class 'gpytorch.kernels.kernel.ProductKernel'>, 'ProductStructureKernel': <class 'gpytorch.kernels.product_structure_kernel.ProductStructureKernel'>, 'RBFKernel': <class 'gpytorch.kernels.rbf_kernel.RBFKernel'>, 'RBFKernelGrad': <class 'gpytorch.kernels.rbf_kernel_grad.RBFKernelGrad'>, 'RBFKernelGradGrad': <class 'gpytorch.kernels.rbf_kernel_gradgrad.RBFKernelGradGrad'>, 'RFFKernel': <class 'gpytorch.kernels.rff_kernel.RFFKernel'>, 'RQKernel': <class 'gpytorch.kernels.rq_kernel.RQKernel'>, 'RandomGenerator': <class 'aepsych.generators.random_generator.RandomGenerator'>, 'SampleAroundPointsGenerator': <class 'aepsych.generators.manual_generator.SampleAroundPointsGenerator'>, 'ScaleKernel': <class 'gpytorch.kernels.scale_kernel.ScaleKernel'>, 'SemiPProbabilityObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPProbabilityObjective'>, 'SemiPThresholdObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPThresholdObjective'>, 'SemiParametricGPModel': <class 'aepsych.models.semi_p.SemiParametricGPModel'>, 'SequentialStrategy': <class 'aepsych.strategy.SequentialStrategy'>, 'SobolAllocator': <class 'aepsych.models.inducing_point_allocators.SobolAllocator'>, 'SobolGenerator': <class 'aepsych.generators.sobol_generator.SobolGenerator'>, 'SoftmaxLikelihood': <class 'gpytorch.likelihoods.softmax_likelihood.SoftmaxLikelihood'>, 'SpectralDeltaKernel': <class 'gpytorch.kernels.spectral_delta_kernel.SpectralDeltaKernel'>, 'SpectralMixtureKernel': <class 'gpytorch.kernels.spectral_mixture_kernel.SpectralMixtureKernel'>, 'Strategy': <class 'aepsych.strategy.Strategy'>, 'StudentTLikelihood': <class 'gpytorch.likelihoods.student_t_likelihood.StudentTLikelihood'>, '_GaussianLikelihoodBase': <class 'gpytorch.likelihoods.gaussian_likelihood._GaussianLikelihoodBase'>, '_MultitaskGaussianLikelihoodBase': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood._MultitaskGaussianLikelihoodBase'>, '_OneDimensionalLikelihood': <class 'gpytorch.likelihoods.likelihood._OneDimensionalLikelihood'>, 'default_mean_covar_factory': <function default_mean_covar_factory>, 'monotonic_mean_covar_factory': <function monotonic_mean_covar_factory>, 'ordinal_mean_covar_factory': <function ordinal_mean_covar_factory>, 'pairwise_mean_covar_factory': <function pairwise_mean_covar_factory>, 'semi_p_posterior_transform': <function semi_p_posterior_transform>, 'song_mean_covar_factory': <function song_mean_covar_factory>}
    +registered_names: ClassVar[Dict[str, object]] = {'AEPsychObjective': <class 'aepsych.acquisition.objective.objective.AEPsychObjective'>, 'AcqfThompsonSamplerGenerator': <class 'aepsych.generators.acqf_thompson_sampler_generator.AcqfThompsonSamplerGenerator'>, 'AdditiveKernel': <class 'gpytorch.kernels.kernel.AdditiveKernel'>, 'AdditiveStructureKernel': <class 'gpytorch.kernels.additive_structure_kernel.AdditiveStructureKernel'>, 'ApproxGlobalSUR': <class 'aepsych.acquisition.lookahead.ApproxGlobalSUR'>, 'ArcKernel': <class 'gpytorch.kernels.arc_kernel.ArcKernel'>, 'BernoulliLikelihood': <class 'gpytorch.likelihoods.bernoulli_likelihood.BernoulliLikelihood'>, 'BernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.BernoulliMCMutualInformation'>, 'BernoulliObjectiveLikelihood': <class 'aepsych.likelihoods.bernoulli.BernoulliObjectiveLikelihood'>, 'BetaLikelihood': <class 'gpytorch.likelihoods.beta_likelihood.BetaLikelihood'>, 'ConstantKernel': <class 'gpytorch.kernels.constant_kernel.ConstantKernel'>, 'CosineKernel': <class 'gpytorch.kernels.cosine_kernel.CosineKernel'>, 'CylindricalKernel': <class 'gpytorch.kernels.cylindrical_kernel.CylindricalKernel'>, 'DirichletClassificationLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.DirichletClassificationLikelihood'>, 'DistributionalInputKernel': <class 'gpytorch.kernels.distributional_input_kernel.DistributionalInputKernel'>, 'EAVC': <class 'aepsych.acquisition.lookahead.EAVC'>, 'EpsilonGreedyGenerator': <class 'aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator'>, 'FixedAllocator': <class 'aepsych.models.inducing_points.fixed.FixedAllocator'>, 'FixedNoiseGaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.FixedNoiseGaussianLikelihood'>, 'FloorGumbelObjective': <class 'aepsych.acquisition.objective.objective.FloorGumbelObjective'>, 'FloorLogitObjective': <class 'aepsych.acquisition.objective.objective.FloorLogitObjective'>, 'FloorProbitObjective': <class 'aepsych.acquisition.objective.objective.FloorProbitObjective'>, 'GPBetaRegressionModel': <class 'aepsych.models.gp_classification.GPBetaRegressionModel'>, 'GPClassificationModel': <class 'aepsych.models.gp_classification.GPClassificationModel'>, 'GPRegressionModel': <class 'aepsych.models.gp_regression.GPRegressionModel'>, 'GaussianLikelihood': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihood'>, 'GaussianLikelihoodWithMissingObs': <class 'gpytorch.likelihoods.gaussian_likelihood.GaussianLikelihoodWithMissingObs'>, 'GaussianSymmetrizedKLKernel': <class 'gpytorch.kernels.gaussian_symmetrized_kl_kernel.GaussianSymmetrizedKLKernel'>, 'GlobalMI': <class 'aepsych.acquisition.lookahead.GlobalMI'>, 'GlobalSUR': <class 'aepsych.acquisition.lookahead.GlobalSUR'>, 'GreedyVarianceReduction': <class 'aepsych.models.inducing_points.greedy_variance_reduction.GreedyVarianceReduction'>, 'GridInterpolationKernel': <class 'gpytorch.kernels.grid_interpolation_kernel.GridInterpolationKernel'>, 'GridKernel': <class 'gpytorch.kernels.grid_kernel.GridKernel'>, 'HadamardSemiPModel': <class 'aepsych.models.semi_p.HadamardSemiPModel'>, 'HammingIMQKernel': <class 'gpytorch.kernels.hamming_kernel.HammingIMQKernel'>, 'HeteroskedasticNoise': <class 'gpytorch.likelihoods.noise_models.HeteroskedasticNoise'>, 'IndependentMultitaskGPRModel': <class 'aepsych.models.multitask_regression.IndependentMultitaskGPRModel'>, 'IndexKernel': <class 'gpytorch.kernels.index_kernel.IndexKernel'>, 'InducingPointKernel': <class 'gpytorch.kernels.inducing_point_kernel.InducingPointKernel'>, 'IntensityAwareSemiPGenerator': <class 'aepsych.generators.semi_p.IntensityAwareSemiPGenerator'>, 'KMeansAllocator': <class 'aepsych.models.inducing_points.kmeans.KMeansAllocator'>, 'Kernel': <class 'gpytorch.kernels.kernel.Kernel'>, 'LCMKernel': <class 'gpytorch.kernels.lcm_kernel.LCMKernel'>, 'LaplaceLikelihood': <class 'gpytorch.likelihoods.laplace_likelihood.LaplaceLikelihood'>, 'Likelihood': <class 'gpytorch.likelihoods.likelihood.Likelihood'>, 'LikelihoodList': <class 'gpytorch.likelihoods.likelihood_list.LikelihoodList'>, 'LinearBernoulliLikelihood': <class 'aepsych.likelihoods.semi_p.LinearBernoulliLikelihood'>, 'LinearKernel': <class 'gpytorch.kernels.linear_kernel.LinearKernel'>, 'LocalMI': <class 'aepsych.acquisition.lookahead.LocalMI'>, 'LocalSUR': <class 'aepsych.acquisition.lookahead.LocalSUR'>, 'MCLevelSetEstimation': <class 'aepsych.acquisition.lse.MCLevelSetEstimation'>, 'MCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MCPosteriorVariance'>, 'ManualGenerator': <class 'aepsych.generators.manual_generator.ManualGenerator'>, 'Matern52KernelGrad': <class 'gpytorch.kernels.matern52_kernel_grad.Matern52KernelGrad'>, 'MaternKernel': <class 'gpytorch.kernels.matern_kernel.MaternKernel'>, 'MonotonicBernoulliMCMutualInformation': <class 'aepsych.acquisition.mutual_information.MonotonicBernoulliMCMutualInformation'>, 'MonotonicMCLSE': <class 'aepsych.acquisition.monotonic_rejection.MonotonicMCLSE'>, 'MonotonicMCPosteriorVariance': <class 'aepsych.acquisition.mc_posterior_variance.MonotonicMCPosteriorVariance'>, 'MonotonicProjectionGP': <class 'aepsych.models.monotonic_projection_gp.MonotonicProjectionGP'>, 'MonotonicRejectionGP': <class 'aepsych.models.monotonic_rejection_gp.MonotonicRejectionGP'>, 'MonotonicRejectionGenerator': <class 'aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator'>, 'MonotonicThompsonSamplerGenerator': <class 'aepsych.generators.monotonic_thompson_sampler_generator.MonotonicThompsonSamplerGenerator'>, 'MultiDeviceKernel': <class 'gpytorch.kernels.multi_device_kernel.MultiDeviceKernel'>, 'MultitaskGPRModel': <class 'aepsych.models.multitask_regression.MultitaskGPRModel'>, 'MultitaskGaussianLikelihood': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood.MultitaskGaussianLikelihood'>, 'MultitaskKernel': <class 'gpytorch.kernels.multitask_kernel.MultitaskKernel'>, 'NewtonGirardAdditiveKernel': <class 'gpytorch.kernels.newton_girard_additive_kernel.NewtonGirardAdditiveKernel'>, 'None': None, 'OptimizeAcqfGenerator': <class 'aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator'>, 'OrdinalGPModel': <class 'aepsych.models.ordinal_gp.OrdinalGPModel'>, 'OrdinalLikelihood': <class 'aepsych.likelihoods.ordinal.OrdinalLikelihood'>, 'PairwiseOptimizeAcqfGenerator': <class 'aepsych.generators.pairwise_optimize_acqf_generator.PairwiseOptimizeAcqfGenerator'>, 'PairwiseProbitModel': <class 'aepsych.models.pairwise_probit.PairwiseProbitModel'>, 'PairwiseSobolGenerator': <class 'aepsych.generators.pairwise_sobol_generator.PairwiseSobolGenerator'>, 'PeriodicKernel': <class 'gpytorch.kernels.periodic_kernel.PeriodicKernel'>, 'PiecewisePolynomialKernel': <class 'gpytorch.kernels.piecewise_polynomial_kernel.PiecewisePolynomialKernel'>, 'PolynomialKernel': <class 'gpytorch.kernels.polynomial_kernel.PolynomialKernel'>, 'PolynomialKernelGrad': <class 'gpytorch.kernels.polynomial_kernel_grad.PolynomialKernelGrad'>, 'ProbitObjective': <class 'aepsych.acquisition.objective.objective.ProbitObjective'>, 'ProductKernel': <class 'gpytorch.kernels.kernel.ProductKernel'>, 'ProductStructureKernel': <class 'gpytorch.kernels.product_structure_kernel.ProductStructureKernel'>, 'RBFKernel': <class 'gpytorch.kernels.rbf_kernel.RBFKernel'>, 'RBFKernelGrad': <class 'gpytorch.kernels.rbf_kernel_grad.RBFKernelGrad'>, 'RBFKernelGradGrad': <class 'gpytorch.kernels.rbf_kernel_gradgrad.RBFKernelGradGrad'>, 'RFFKernel': <class 'gpytorch.kernels.rff_kernel.RFFKernel'>, 'RQKernel': <class 'gpytorch.kernels.rq_kernel.RQKernel'>, 'RandomGenerator': <class 'aepsych.generators.random_generator.RandomGenerator'>, 'SampleAroundPointsGenerator': <class 'aepsych.generators.manual_generator.SampleAroundPointsGenerator'>, 'ScaleKernel': <class 'gpytorch.kernels.scale_kernel.ScaleKernel'>, 'SemiPProbabilityObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPProbabilityObjective'>, 'SemiPThresholdObjective': <class 'aepsych.acquisition.objective.semi_p.SemiPThresholdObjective'>, 'SemiParametricGPModel': <class 'aepsych.models.semi_p.SemiParametricGPModel'>, 'SequentialStrategy': <class 'aepsych.strategy.SequentialStrategy'>, 'SobolAllocator': <class 'aepsych.models.inducing_points.sobol.SobolAllocator'>, 'SobolGenerator': <class 'aepsych.generators.sobol_generator.SobolGenerator'>, 'SoftmaxLikelihood': <class 'gpytorch.likelihoods.softmax_likelihood.SoftmaxLikelihood'>, 'SpectralDeltaKernel': <class 'gpytorch.kernels.spectral_delta_kernel.SpectralDeltaKernel'>, 'SpectralMixtureKernel': <class 'gpytorch.kernels.spectral_mixture_kernel.SpectralMixtureKernel'>, 'Strategy': <class 'aepsych.strategy.Strategy'>, 'StudentTLikelihood': <class 'gpytorch.likelihoods.student_t_likelihood.StudentTLikelihood'>, '_GaussianLikelihoodBase': <class 'gpytorch.likelihoods.gaussian_likelihood._GaussianLikelihoodBase'>, '_MultitaskGaussianLikelihoodBase': <class 'gpytorch.likelihoods.multitask_gaussian_likelihood._MultitaskGaussianLikelihoodBase'>, '_OneDimensionalLikelihood': <class 'gpytorch.likelihoods.likelihood._OneDimensionalLikelihood'>, 'default_mean_covar_factory': <function default_mean_covar_factory>, 'monotonic_mean_covar_factory': <function monotonic_mean_covar_factory>, 'ordinal_mean_covar_factory': <function ordinal_mean_covar_factory>, 'pairwise_mean_covar_factory': <function pairwise_mean_covar_factory>, 'semi_p_posterior_transform': <function semi_p_posterior_transform>, 'song_mean_covar_factory': <function song_mean_covar_factory>}

    diff --git a/api/generators.html b/api/generators.html index 3d0c59d6c..1d494d46a 100644 --- a/api/generators.html +++ b/api/generators.html @@ -99,12 +99,14 @@

    Submodules

    aepsych.generators.epsilon_greedy_generator module

    -class aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator(subgenerator, epsilon=0.1)[source]
    +class aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator(lb, ub, subgenerator, epsilon=0.1)[source]

    Bases: AEPsychGenerator

    Initialize EpsilonGreedyGenerator.

    Parameters:
      +
    • lb (torch.Tensor) – Lower bounds for the optimization.

    • +
    • ub (torch.Tensor) – Upper bounds for the optimization.

    • subgenerator (AEPsychGenerator) – The generator to use when not exploiting.

    • epsilon (float) – The probability of exploration. Defaults to 0.1.

    @@ -342,13 +344,17 @@

    Submodules
    -class aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator(acqf, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]
    +class aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator(acqf, lb, ub, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]

    Bases: AEPsychGenerator[MonotonicRejectionGP]

    Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing an acquisition function through stochastic gradient descent.

    Initialize MonotonicRejectionGenerator. :param acqf: Acquisition function to use. -:type acqf: MonotonicMCAcquisition +:type acqf: AcquisitionFunction +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf_kwargs: Extra arguments to

    pass to acquisition function. Defaults to None.

    @@ -361,6 +367,8 @@

    Submodules

    aepsych.generators.optimize_acqf_generator module

    -class aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -495,6 +507,8 @@

    Submodules

    Module contents

    -class aepsych.generators.OptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.OptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -701,6 +719,8 @@

    Submodules
    -class aepsych.generators.MonotonicRejectionGenerator(acqf, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]
    +class aepsych.generators.MonotonicRejectionGenerator(acqf, lb, ub, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]

    Bases: AEPsychGenerator[MonotonicRejectionGP]

    Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing an acquisition function through stochastic gradient descent.

    Initialize MonotonicRejectionGenerator. :param acqf: Acquisition function to use. -:type acqf: MonotonicMCAcquisition +:type acqf: AcquisitionFunction +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf_kwargs: Extra arguments to

    pass to acquisition function. Defaults to None.

    @@ -780,6 +804,8 @@

    Submodules
    -class aepsych.generators.EpsilonGreedyGenerator(subgenerator, epsilon=0.1)[source]
    +class aepsych.generators.EpsilonGreedyGenerator(lb, ub, subgenerator, epsilon=0.1)[source]

    Bases: AEPsychGenerator

    Initialize EpsilonGreedyGenerator.

    Parameters:
      +
    • lb (torch.Tensor) – Lower bounds for the optimization.

    • +
    • ub (torch.Tensor) – Upper bounds for the optimization.

    • subgenerator (AEPsychGenerator) – The generator to use when not exploiting.

    • epsilon (float) – The probability of exploration. Defaults to 0.1.

    @@ -1235,10 +1263,14 @@

    Submodules
    -class aepsych.generators.PairwiseOptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.PairwiseOptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: OptimizeAcqfGenerator

    Deprecated. Use OptimizeAcqfGenerator instead.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -1252,6 +1284,8 @@

    Submodules
    -class aepsych.generators.IntensityAwareSemiPGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.IntensityAwareSemiPGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: OptimizeAcqfGenerator

    Generator for SemiP. With botorch machinery, in order to optimize acquisition separately over context and intensity, we need two ingredients. @@ -1351,6 +1385,10 @@

    Submodules
    -class aepsych.generators.AcqfThompsonSamplerGenerator(acqf, acqf_kwargs=None, samps=1000, stimuli_per_trial=1)[source]
    +class aepsych.generators.AcqfThompsonSamplerGenerator(lb, ub, acqf, acqf_kwargs=None, samps=1000, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -1407,6 +1451,8 @@

    Submodules
    • samps (int) – Number of samples for quasi-random initialization of the acquisition function optimizer. Defaults to 1000.

    • stimuli_per_trial (int) – Number of stimuli per trial. Defaults to 1.

    • +
    • lb (torch.Tensor) –

    • +
    • ub (torch.Tensor) –

    • acqf (botorch.acquisition.AcquisitionFunction) –

    • acqf_kwargs (Dict[str, object], optional) –

    diff --git a/api/generators/index.html b/api/generators/index.html index 3d0c59d6c..1d494d46a 100644 --- a/api/generators/index.html +++ b/api/generators/index.html @@ -99,12 +99,14 @@

    Submodules

    aepsych.generators.epsilon_greedy_generator module

    -class aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator(subgenerator, epsilon=0.1)[source]
    +class aepsych.generators.epsilon_greedy_generator.EpsilonGreedyGenerator(lb, ub, subgenerator, epsilon=0.1)[source]

    Bases: AEPsychGenerator

    Initialize EpsilonGreedyGenerator.

    Parameters:
      +
    • lb (torch.Tensor) – Lower bounds for the optimization.

    • +
    • ub (torch.Tensor) – Upper bounds for the optimization.

    • subgenerator (AEPsychGenerator) – The generator to use when not exploiting.

    • epsilon (float) – The probability of exploration. Defaults to 0.1.

    @@ -342,13 +344,17 @@

    Submodules
    -class aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator(acqf, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]
    +class aepsych.generators.monotonic_rejection_generator.MonotonicRejectionGenerator(acqf, lb, ub, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]

    Bases: AEPsychGenerator[MonotonicRejectionGP]

    Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing an acquisition function through stochastic gradient descent.

    Initialize MonotonicRejectionGenerator. :param acqf: Acquisition function to use. -:type acqf: MonotonicMCAcquisition +:type acqf: AcquisitionFunction +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf_kwargs: Extra arguments to

    pass to acquisition function. Defaults to None.

    @@ -361,6 +367,8 @@

    Submodules

    aepsych.generators.optimize_acqf_generator module

    -class aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.optimize_acqf_generator.OptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -495,6 +507,8 @@

    Submodules

    Module contents

    -class aepsych.generators.OptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.OptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -701,6 +719,8 @@

    Submodules
    -class aepsych.generators.MonotonicRejectionGenerator(acqf, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]
    +class aepsych.generators.MonotonicRejectionGenerator(acqf, lb, ub, acqf_kwargs=None, model_gen_options=None, explore_features=None)[source]

    Bases: AEPsychGenerator[MonotonicRejectionGP]

    Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing an acquisition function through stochastic gradient descent.

    Initialize MonotonicRejectionGenerator. :param acqf: Acquisition function to use. -:type acqf: MonotonicMCAcquisition +:type acqf: AcquisitionFunction +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf_kwargs: Extra arguments to

    pass to acquisition function. Defaults to None.

    @@ -780,6 +804,8 @@

    Submodules
    -class aepsych.generators.EpsilonGreedyGenerator(subgenerator, epsilon=0.1)[source]
    +class aepsych.generators.EpsilonGreedyGenerator(lb, ub, subgenerator, epsilon=0.1)[source]

    Bases: AEPsychGenerator

    Initialize EpsilonGreedyGenerator.

    Parameters:
      +
    • lb (torch.Tensor) – Lower bounds for the optimization.

    • +
    • ub (torch.Tensor) – Upper bounds for the optimization.

    • subgenerator (AEPsychGenerator) – The generator to use when not exploiting.

    • epsilon (float) – The probability of exploration. Defaults to 0.1.

    @@ -1235,10 +1263,14 @@

    Submodules
    -class aepsych.generators.PairwiseOptimizeAcqfGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.PairwiseOptimizeAcqfGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: OptimizeAcqfGenerator

    Deprecated. Use OptimizeAcqfGenerator instead.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -1252,6 +1284,8 @@

    Submodules
    -class aepsych.generators.IntensityAwareSemiPGenerator(acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]
    +class aepsych.generators.IntensityAwareSemiPGenerator(lb, ub, acqf, acqf_kwargs=None, restarts=10, samps=1000, max_gen_time=None, stimuli_per_trial=1)[source]

    Bases: OptimizeAcqfGenerator

    Generator for SemiP. With botorch machinery, in order to optimize acquisition separately over context and intensity, we need two ingredients. @@ -1351,6 +1385,10 @@

    Submodules
    -class aepsych.generators.AcqfThompsonSamplerGenerator(acqf, acqf_kwargs=None, samps=1000, stimuli_per_trial=1)[source]
    +class aepsych.generators.AcqfThompsonSamplerGenerator(lb, ub, acqf, acqf_kwargs=None, samps=1000, stimuli_per_trial=1)[source]

    Bases: AEPsychGenerator

    Generator that chooses points by minimizing an acquisition function.

    Initialize OptimizeAcqfGenerator. +:param lb: Lower bounds for the optimization. +:type lb: torch.Tensor +:param ub: Upper bounds for the optimization. +:type ub: torch.Tensor :param acqf: Acquisition function to use. :type acqf: AcquisitionFunction :param acqf_kwargs: Extra arguments to

    @@ -1407,6 +1451,8 @@

    Submodules
    • samps (int) – Number of samples for quasi-random initialization of the acquisition function optimizer. Defaults to 1000.

    • stimuli_per_trial (int) – Number of stimuli per trial. Defaults to 1.

    • +
    • lb (torch.Tensor) –

    • +
    • ub (torch.Tensor) –

    • acqf (botorch.acquisition.AcquisitionFunction) –

    • acqf_kwargs (Dict[str, object], optional) –

    diff --git a/api/genindex.html b/api/genindex.html index 3c381bb35..a0bdc9694 100644 --- a/api/genindex.html +++ b/api/genindex.html @@ -27,7 +27,6 @@

    Index

    |
    H | I | J - | K | L | M | N @@ -231,6 +230,8 @@

    A

  • module
  • + +
    • aepsych.generators @@ -245,8 +246,6 @@

      A

    • module
    - - -
  • allocate_inducing_points() (aepsych.models.AutoAllocator method) -
  • ApproxGlobalSUR (class in aepsych.acquisition)
  • approximate_lookahead_levelset_at_xstar() (in module aepsych.acquisition.lookahead_utils)
  • -
  • AutoAllocator (class in aepsych.models) -
  • B

    @@ -477,13 +461,13 @@

    B

  • (class in aepsych.benchmark.benchmark)
  • + + - @@ -619,13 +601,13 @@

    D

  • delete_db() (aepsych.database.db.Database method)
  • + + - @@ -790,8 +768,6 @@

    F

  • (aepsych.strategy.Strategy method)
  • -
  • FixedAllocator (class in aepsych.models) -
  • flatten_config() (aepsych.benchmark.Benchmark method)
  • - - +
  • GPRegressionModel (class in aepsych.models)
  • -
  • GreedyVarianceReduction (class in aepsych.models) -
  • H

    @@ -1167,11 +1122,8 @@

    I

  • interpolate_monotonic() (in module aepsych.utils)
  • -
  • inv_query() (aepsych.models.base.AEPsychMixin method) -
  • inverse() (aepsych.acquisition.objective.AEPsychObjective method) -

    K

    - - -

    L

      @@ -1936,11 +1881,9 @@

      S

  • SimplifyArrays() (in module aepsych.server.sockets)
  • -
  • SMOCU (class in aepsych.acquisition.lookahead) -
    • -
    • SobolAllocator (class in aepsych.models) +
    • SMOCU (class in aepsych.acquisition.lookahead)
    • SobolGenerator (class in aepsych.generators)
        diff --git a/api/models.html b/api/models.html index 1f9e7386a..58fb58bc7 100644 --- a/api/models.html +++ b/api/models.html @@ -209,152 +209,6 @@

        Submodules train_targets: Optional[Tensor]
        -
        -
        -property bounds: Tensor
        -
        -
        -
        -get_max(locked_dims=None, probability_space=False, n_samples=1000, max_time=None)[source]
        -

        Return the maximum of the modeled function, subject to constraints

        -
        -
        Parameters:
        -
          -
        • locked_dims (Mapping[int, List[float]], optional) – Dimensions to fix, so that the -max is along a slice of the full surface. Defaults to None.

        • -
        • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

        • -
        • n_samples (int) – number of coarse grid points to sample for optimization estimate.

        • -
        • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

        • -
        • self (ModelProtocol) –

        • -
        -
        -
        Returns:
        -

        Tuple containing the max and its location (argmax).

        -
        -
        Return type:
        -

        Tuple[float, torch.Tensor]

        -
        -
        -
        -
        -
        -get_min(locked_dims=None, probability_space=False, n_samples=1000, max_time=None)[source]
        -

        Return the minimum of the modeled function, subject to constraints -:param locked_dims: Dimensions to fix, so that the

        -
        -

        min is along a slice of the full surface.

        -
        -
        -
        Parameters:
        -
          -
        • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

        • -
        • n_samples (int) – number of coarse grid points to sample for optimization estimate.

        • -
        • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

        • -
        • self (ModelProtocol) –

        • -
        • locked_dims (Mapping[int, List[float]], optional) –

        • -
        -
        -
        Returns:
        -

        Tuple containing the min and its location (argmin).

        -
        -
        Return type:
        -

        Tuple[float, torch.Tensor]

        -
        -
        -
        -
        -
        -inv_query(y, locked_dims=None, probability_space=False, n_samples=1000, max_time=None, weights=None)[source]
        -

        Query the model inverse. -Return nearest x such that f(x) = queried y, and also return the

        -
        -

        value of f at that point.

        -
        -
        -
        Parameters:
        -
          -
        • y (float) – Points at which to find the inverse.

        • -
        • locked_dims (Mapping[int, float], optional) – Dimensions to fix, so that the -inverse is along a slice of the full surface.

        • -
        • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

        • -
        • n_samples (int) – number of coarse grid points to sample for optimization estimate. Defaults to 1000.

        • -
        • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

        • -
        • weights (torch.Tensor, optional) – Weights for the optimization. Defaults to None.

        • -
        -
        -
        Returns:
        -

        -
        Tuple containing the value of f

        nearest to queried y and the x position of this value.

        -
        -
        -

        -
        -
        Return type:
        -

        Tuple[float, torch.Tensor]

        -
        -
        -
        -
        -
        -get_jnd(grid=None, cred_level=None, intensity_dim=- 1, confsamps=500, method='step')[source]
        -

        Calculate the JND.

        -

        Note that JND can have multiple plausible definitions -outside of the linear case, so we provide options for how to compute it. -For method=”step”, we report how far one needs to go over in stimulus -space to move 1 unit up in latent space (this is a lot of people’s -conventional understanding of the JND). -For method=”taylor”, we report the local derivative, which also maps to a -1st-order Taylor expansion of the latent function. This is a formal -generalization of JND as defined in Weber’s law. -Both definitions are equivalent for linear psychometric functions.

        -
        -
        Parameters:
        -
          -
        • grid (torch.Tensor, optional) – Mesh grid over which to find the JND. -Defaults to a square grid of size as determined by aepsych.utils.dim_grid.

        • -
        • cred_level (float, optional) – Credible level for computing an interval. -Defaults to None, computing no interval.

        • -
        • intensity_dim (int) – Dimension over which to compute the JND. -Defaults to -1.

        • -
        • confsamps (int) – Number of posterior samples to use for -computing the credible interval. Defaults to 500.

        • -
        • method (str) – “taylor” or “step” method (see docstring). -Defaults to “step”.

        • -
        • self (ModelProtocol) –

        • -
        -
        -
        Returns:
        -

        -
        either the

        mean JND, or a median, lower, upper tuple of the JND posterior.

        -
        -
        -

        -
        -
        Return type:
        -

        Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]

        -
        -
        -
        -
        -
        -dim_grid(gridsize=30, slice_dims=None)[source]
        -

        Generate a grid based on lower, upper, and dim.

        -
        -
        Parameters:
        -
          -
        • gridsize (int) – Number of points in each dimension. Defaults to 30.

        • -
        • slice_dims (Mapping[int, float], optional) – Dimensions to fix at a certain value. Defaults to None.

        • -
        • self (ModelProtocol) –

        • -
        -
        -
        Return type:
        -

        Tensor

        -
        -
        -
        set_train_data(inputs=None, targets=None, strict=False)[source]
        @@ -560,22 +414,19 @@

        Submodules
        Parameters:
          -
        • lb (torch.Tensor) – Lower bounds of the parameters.

        • -
        • ub (torch.Tensor) – Upper bounds of the parameters.

        • -
        • inducing_point_method (InducingPointAllocator) – The method to use for selecting inducing points.

        • -
        • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

        • +
        • dim (int) – The number of dimensions in the parameter space.

        • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

        • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

        • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Bernouli likelihood.

        • -
        • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

        • +
        • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

        • +
        • inducing_size (int) – Number of inducing points. Defaults to 100.

        • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

        • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

        • -
        • inducing_points (Optional[torch.Tensor]) –

        @@ -703,17 +554,15 @@

        Submodules
        Parameters:
          -
        • lb (torch.Tensor) – Lower bounds of the parameters.

        • -
        • ub (torch.Tensor) – Upper bounds of the parameters.

        • -
        • inducing_point_method (InducingPointAllocator) – The method to use to select the inducing points.

        • -
        • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

        • +
        • dim (int) – The number of dimensions in the parameter space.

        • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

        • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

        • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Beta likelihood.

        • -
        • inducing_size (int, optional) – Number of inducing points. Defaults to 100.

        • +
        • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

        • +
        • inducing_size (int) – Number of inducing points. Defaults to 100.

        • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

        • optimizer_options (Optional[Dict[str, Any]]) –

        • @@ -761,7 +610,8 @@

          Submodules
          Parameters:
            -
          • lb (torch.Tensor) – Lower bounds of the parameters.

          • -
          • ub (torch.Tensor) – Upper bounds of the parameters.

          • -
          • inducing_point_method (InducingPointAllocator) – The method to use for selecting inducing points.

          • -
          • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

          • +
          • dim (int) – The number of dimensions in the parameter space.

          • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

          • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

          • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Bernouli likelihood.

          • -
          • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

          • +
          • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

          • +
          • inducing_size (int) – Number of inducing points. Defaults to 100.

          • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

          • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

          • -
          • inducing_points (Optional[torch.Tensor]) –

          @@ -1093,7 +940,8 @@

          Submodules
          Parameters:
            -
          • lb (torch.Tensor) – Lower bounds of the parameters.

          • -
          • ub (torch.Tensor) – Upper bounds of the parameters.

          • -
          • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

          • +
          • dim (int) – The number of dimensions in the parameter space.

          • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

          • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

          • @@ -1499,36 +1344,36 @@

            Submodules
            Parameters:
            • lb (torch.Tensor) – Lower bounds of the parameters.

            • ub (torch.Tensor) – Upper bounds of the parameters.

            • -
            • inducing_point_method (InducingPointAllocator) – The method for allocating inducing points.

            • +
            • dim (int, optional) – The number of dimensions in the parameter space.

            • monotonic_dims (List[int]) – A list of the dimensions on which monotonicity should be enforced.

            • monotonic_grid_size (int) – The size of the grid, s, in 1. above. Defaults to 20.

            • min_f_val (float, optional) – If provided, maintains this minimum in the projection in 5. Defaults to None.

            • -
            • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

            • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

            • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior. Defaults to None.

            • likelihood (Likelihood, optional) – The likelihood function to use. If None defaults to Gaussian likelihood. Defaults to None.

            • -
            • inducing_size (int, optional) – The number of inducing points to use. Defaults to None.

            • +
            • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

            • +
            • inducing_size (int) – The number of inducing points to use. Defaults to 100.

            • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

            • optimizer_options (Optional[Dict[str, Any]]) –

            • @@ -1740,35 +1585,43 @@

              Submodules
              Parameters:
                -
              • stim_dim (int) – Index of the intensity (monotonic) dimension. Defaults to 0.

              • -
              • slope_mean_module (gpytorch.means.Mean, optional) – Mean module to use (default: constant mean) for slope.

              • -
              • slope_covar_module (gpytorch.kernels.Kernel, optional) – Covariance kernel to use (default: scaled RBF) for slope.

              • -
              • offset_mean_module (gpytorch.means.Mean, optional) – Mean module to use (default: constant mean) for offset.

              • -
              • offset_covar_module (gpytorch.kernels.Kernel, optional) – Covariance kernel to use (default: scaled RBF) for offset.

              • -
              • likelihood (gpytorch.likelihood.Likelihood, optional)) – defaults to bernoulli with logistic input and a floor of .5

              • -
              • slope_mean (float) – The mean of the slope. Defaults to 2.

              • -
              • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

              • +
              • inducing_size (int) – Number of inducing points. Defaults to 100.

              • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

              • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

              • -
              • lb (torch.Tensor) –

              • -
              • ub (torch.Tensor) –

              • -
              • inducing_point_method (InducingPointAllocator) –

              • -
              • dim (int, optional) –

              • +
              • dim (int) –

              • +
              • stim_dim (int) –

              • +
              • slope_mean_module (Optional[gpytorch.means.Mean]) –

              • +
              • slope_covar_module (Optional[gpytorch.kernels.Kernel]) –

              • +
              • offset_mean_module (Optional[gpytorch.means.Mean]) –

              • +
              • offset_covar_module (Optional[gpytorch.kernels.Kernel]) –

              • +
              • likelihood (Optional[Likelihood]) –

              • +
              • slope_mean (float) –

              • +
              • inducing_point_method (InducingPointAllocator, optional) –

              @@ -1852,35 +1705,33 @@

              Submodules
              Parameters:
                -
              • stim_dim (int) – Index of the intensity (monotonic) dimension. Defaults to 0.

              • -
              • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

              • -
              • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a -gamma prior.

              • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to linear-Bernouli likelihood with probit link.

              • slope_mean (float) – The mean of the slope. Defaults to 2.

              • -
              • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

              • +
              • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

              • +
              • inducing_size (int) – Number of inducing points. Defaults to 100.

              • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

              • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

              • -
              • lb (torch.Tensor) –

              • -
              • ub (torch.Tensor) –

              • -
              • inducing_point_method (InducingPointAllocator) –

              • -
              • dim (int, optional) –

              • +
              • dim (int) –

              • +
              • stim_dim (int) –

              • +
              • mean_module (Optional[gpytorch.means.Mean]) –

              • +
              • covar_module (gpytorch.kernels.Kernel, optional) –

              @@ -2016,17 +1867,15 @@

              Submodules
              Parameters:
                -
              • lb (torch.Tensor) – Lower bounds of the parameters.

              • -
              • ub (torch.Tensor) – Upper bounds of the parameters.

              • -
              • inducing_point_method (InducingPointAllocator) – The method to use to select the inducing points.

              • -
              • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

              • +
              • dim (int) – The number of dimensions in the parameter space.

              • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

              • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

              • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Beta likelihood.

              • -
              • inducing_size (int, optional) – Number of inducing points. Defaults to 100.

              • +
              • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

              • +
              • inducing_size (int) – Number of inducing points. Defaults to 100.

              • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

              • optimizer_options (Optional[Dict[str, Any]]) –

              • @@ -2186,333 +2035,6 @@

                Submodules -
                -class aepsych.models.AutoAllocator(*args, **kwargs)[source]
                -

                Bases: BaseAllocator

                -

                An inducing point allocator that dynamically chooses an allocation strategy -based on the number of unique data points available.

                -

                Initialize the AutoAllocator with a fallback allocator.

                -
                -
                Parameters:
                -
                  -
                • fallback_allocator (InducingPointAllocator, optional) – Allocator to use if there are -more unique points than required.

                • -
                • bounds (Optional[Tensor]) –

                • -
                -
                -
                -
                -
                -allocate_inducing_points(inputs, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Allocate inducing points by either using the unique input data directly -or falling back to another allocation method if there are too many unique points.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – A tensor of shape (n, d) containing the input data.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                A (num_inducing, d)-dimensional tensor of inducing points.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the AutoAllocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the AutoAllocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                -
                -
                -class aepsych.models.KMeansAllocator(*args, **kwargs)[source]
                -

                Bases: BaseAllocator

                -

                An inducing point allocator that uses k-means++ to allocate inducing points.

                -

                Initialize the KMeansAllocator.

                -
                -
                Parameters:
                -

                bounds (Optional[Tensor]) –

                -
                -
                -
                -
                -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Generates num_inducing inducing points using k-means++ initialization on the input data.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – A tensor of shape (n, d) containing the input data.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                A (num_inducing, d)-dimensional tensor of inducing points selected via k-means++.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the KMeansAllocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the KMeansAllocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                -
                -
                -class aepsych.models.SobolAllocator(*args, **kwargs)[source]
                -

                Bases: BaseAllocator

                -

                An inducing point allocator that uses Sobol sequences to allocate inducing points.

                -

                Initialize the SobolAllocator with bounds.

                -
                -
                Parameters:
                -

                bounds (Tensor) –

                -
                -
                -
                -
                -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Generates num_inducing inducing points within the specified bounds using Sobol sampling.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – Input tensor, not required for Sobol sampling.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                A (num_inducing, d)-dimensional tensor of inducing points within the specified bounds.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                Raises:
                -

                ValueError – If bounds is not provided.

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the SobolAllocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the SobolAllocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                -
                -
                -class aepsych.models.DummyAllocator(*args, **kwargs)[source]
                -

                Bases: BaseAllocator

                -

                Initialize the DummyAllocator with bounds.

                -
                -
                Parameters:
                -

                bounds (torch.Tensor) – Bounds for allocating points. Should be of shape (2, d).

                -
                -
                -
                -
                -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Allocate inducing points by returning zeros of the appropriate shape.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – Input tensor, not required for DummyAllocator.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                A (num_inducing, d)-dimensional tensor of zeros.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the DummyAllocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the DummyAllocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                -
                -
                -class aepsych.models.FixedAllocator(*args, **kwargs)[source]
                -

                Bases: BaseAllocator

                -

                Initialize the FixedAllocator with inducing points and bounds.

                -
                -
                Parameters:
                -
                  -
                • points (torch.Tensor) – Inducing points to use.

                • -
                • bounds (torch.Tensor, optional) – Bounds for allocating points. Should be of shape (2, d).

                • -
                -
                -
                -
                -
                -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Allocate inducing points by returning the fixed inducing points.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – Input tensor, not required for FixedAllocator.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                The fixed inducing points.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the FixedAllocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the FixedAllocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                -
                -
                -class aepsych.models.GreedyVarianceReduction(*args, **kwargs)[source]
                -

                Bases: GreedyVarianceReduction, ConfigurableMixin

                -

                Initialize the GreedyVarianceReduction with bounds.

                -
                -
                Parameters:
                -

                bounds (torch.Tensor, optional) – Bounds for allocating points. Should be of shape (2, d).

                -
                -
                -
                -
                -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                -

                Allocate inducing points using the GreedyVarianceReduction strategy.

                -
                -
                Parameters:
                -
                  -
                • inputs (torch.Tensor) – Input tensor, not required for GreedyVarianceReduction.

                • -
                • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                • -
                • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                • -
                • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                • -
                -
                -
                Returns:
                -

                The allocated inducing points.

                -
                -
                Return type:
                -

                torch.Tensor

                -
                -
                -
                -
                -
                -classmethod get_config_options(config, name=None, options=None)[source]
                -

                Get configuration options for the GreedyVarianceReduction allocator.

                -
                -
                Parameters:
                -
                  -
                • config (Config) – Configuration object.

                • -
                • name (str, optional) – Name of the allocator, defaults to None.

                • -
                • options (Dict[str, Any], optional) – Additional options, defaults to None.

                • -
                -
                -
                Returns:
                -

                Configuration options for the GreedyVarianceReduction allocator.

                -
                -
                Return type:
                -

                Dict[str, Any]

                -
                -
                -
                -
                diff --git a/api/models/index.html b/api/models/index.html index 1f9e7386a..58fb58bc7 100644 --- a/api/models/index.html +++ b/api/models/index.html @@ -209,152 +209,6 @@

                Submodules train_targets: Optional[Tensor]
                -
                -
                -property bounds: Tensor
                -
                -
                -
                -get_max(locked_dims=None, probability_space=False, n_samples=1000, max_time=None)[source]
                -

                Return the maximum of the modeled function, subject to constraints

                -
                -
                Parameters:
                -
                  -
                • locked_dims (Mapping[int, List[float]], optional) – Dimensions to fix, so that the -max is along a slice of the full surface. Defaults to None.

                • -
                • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

                • -
                • n_samples (int) – number of coarse grid points to sample for optimization estimate.

                • -
                • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

                • -
                • self (ModelProtocol) –

                • -
                -
                -
                Returns:
                -

                Tuple containing the max and its location (argmax).

                -
                -
                Return type:
                -

                Tuple[float, torch.Tensor]

                -
                -
                -
                -
                -
                -get_min(locked_dims=None, probability_space=False, n_samples=1000, max_time=None)[source]
                -

                Return the minimum of the modeled function, subject to constraints -:param locked_dims: Dimensions to fix, so that the

                -
                -

                min is along a slice of the full surface.

                -
                -
                -
                Parameters:
                -
                  -
                • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

                • -
                • n_samples (int) – number of coarse grid points to sample for optimization estimate.

                • -
                • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

                • -
                • self (ModelProtocol) –

                • -
                • locked_dims (Mapping[int, List[float]], optional) –

                • -
                -
                -
                Returns:
                -

                Tuple containing the min and its location (argmin).

                -
                -
                Return type:
                -

                Tuple[float, torch.Tensor]

                -
                -
                -
                -
                -
                -inv_query(y, locked_dims=None, probability_space=False, n_samples=1000, max_time=None, weights=None)[source]
                -

                Query the model inverse. -Return nearest x such that f(x) = queried y, and also return the

                -
                -

                value of f at that point.

                -
                -
                -
                Parameters:
                -
                  -
                • y (float) – Points at which to find the inverse.

                • -
                • locked_dims (Mapping[int, float], optional) – Dimensions to fix, so that the -inverse is along a slice of the full surface.

                • -
                • probability_space (bool) – Is y (and therefore the returned nearest_y) in -probability space instead of latent function space? Defaults to False.

                • -
                • n_samples (int) – number of coarse grid points to sample for optimization estimate. Defaults to 1000.

                • -
                • max_time (float, optional) – Maximum time to spend optimizing. Defaults to None.

                • -
                • weights (torch.Tensor, optional) – Weights for the optimization. Defaults to None.

                • -
                -
                -
                Returns:
                -

                -
                Tuple containing the value of f

                nearest to queried y and the x position of this value.

                -
                -
                -

                -
                -
                Return type:
                -

                Tuple[float, torch.Tensor]

                -
                -
                -
                -
                -
                -get_jnd(grid=None, cred_level=None, intensity_dim=- 1, confsamps=500, method='step')[source]
                -

                Calculate the JND.

                -

                Note that JND can have multiple plausible definitions -outside of the linear case, so we provide options for how to compute it. -For method=”step”, we report how far one needs to go over in stimulus -space to move 1 unit up in latent space (this is a lot of people’s -conventional understanding of the JND). -For method=”taylor”, we report the local derivative, which also maps to a -1st-order Taylor expansion of the latent function. This is a formal -generalization of JND as defined in Weber’s law. -Both definitions are equivalent for linear psychometric functions.

                -
                -
                Parameters:
                -
                  -
                • grid (torch.Tensor, optional) – Mesh grid over which to find the JND. -Defaults to a square grid of size as determined by aepsych.utils.dim_grid.

                • -
                • cred_level (float, optional) – Credible level for computing an interval. -Defaults to None, computing no interval.

                • -
                • intensity_dim (int) – Dimension over which to compute the JND. -Defaults to -1.

                • -
                • confsamps (int) – Number of posterior samples to use for -computing the credible interval. Defaults to 500.

                • -
                • method (str) – “taylor” or “step” method (see docstring). -Defaults to “step”.

                • -
                • self (ModelProtocol) –

                • -
                -
                -
                Returns:
                -

                -
                either the

                mean JND, or a median, lower, upper tuple of the JND posterior.

                -
                -
                -

                -
                -
                Return type:
                -

                Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]

                -
                -
                -
                -
                -
                -dim_grid(gridsize=30, slice_dims=None)[source]
                -

                Generate a grid based on lower, upper, and dim.

                -
                -
                Parameters:
                -
                  -
                • gridsize (int) – Number of points in each dimension. Defaults to 30.

                • -
                • slice_dims (Mapping[int, float], optional) – Dimensions to fix at a certain value. Defaults to None.

                • -
                • self (ModelProtocol) –

                • -
                -
                -
                Return type:
                -

                Tensor

                -
                -
                -
                set_train_data(inputs=None, targets=None, strict=False)[source]
                @@ -560,22 +414,19 @@

                Submodules
                Parameters:
                  -
                • lb (torch.Tensor) – Lower bounds of the parameters.

                • -
                • ub (torch.Tensor) – Upper bounds of the parameters.

                • -
                • inducing_point_method (InducingPointAllocator) – The method to use for selecting inducing points.

                • -
                • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

                • +
                • dim (int) – The number of dimensions in the parameter space.

                • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

                • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

                • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Bernouli likelihood.

                • -
                • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

                • +
                • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                • +
                • inducing_size (int) – Number of inducing points. Defaults to 100.

                • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

                • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

                • -
                • inducing_points (Optional[torch.Tensor]) –

                @@ -703,17 +554,15 @@

                Submodules
                Parameters:
                  -
                • lb (torch.Tensor) – Lower bounds of the parameters.

                • -
                • ub (torch.Tensor) – Upper bounds of the parameters.

                • -
                • inducing_point_method (InducingPointAllocator) – The method to use to select the inducing points.

                • -
                • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

                • +
                • dim (int) – The number of dimensions in the parameter space.

                • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

                • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

                • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Beta likelihood.

                • -
                • inducing_size (int, optional) – Number of inducing points. Defaults to 100.

                • +
                • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                • +
                • inducing_size (int) – Number of inducing points. Defaults to 100.

                • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

                • optimizer_options (Optional[Dict[str, Any]]) –

                • @@ -761,7 +610,8 @@

                  Submodules
                  Parameters:
                    -
                  • lb (torch.Tensor) – Lower bounds of the parameters.

                  • -
                  • ub (torch.Tensor) – Upper bounds of the parameters.

                  • -
                  • inducing_point_method (InducingPointAllocator) – The method to use for selecting inducing points.

                  • -
                  • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

                  • +
                  • dim (int) – The number of dimensions in the parameter space.

                  • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

                  • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

                  • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Bernouli likelihood.

                  • -
                  • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

                  • +
                  • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                  • +
                  • inducing_size (int) – Number of inducing points. Defaults to 100.

                  • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

                  • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

                  • -
                  • inducing_points (Optional[torch.Tensor]) –

                  @@ -1093,7 +940,8 @@

                  Submodules
                  Parameters:
                    -
                  • lb (torch.Tensor) – Lower bounds of the parameters.

                  • -
                  • ub (torch.Tensor) – Upper bounds of the parameters.

                  • -
                  • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub.

                  • +
                  • dim (int) – The number of dimensions in the parameter space.

                  • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

                  • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

                  • @@ -1499,36 +1344,36 @@

                    Submodules
                    Parameters:
                    • lb (torch.Tensor) – Lower bounds of the parameters.

                    • ub (torch.Tensor) – Upper bounds of the parameters.

                    • -
                    • inducing_point_method (InducingPointAllocator) – The method for allocating inducing points.

                    • +
                    • dim (int, optional) – The number of dimensions in the parameter space.

                    • monotonic_dims (List[int]) – A list of the dimensions on which monotonicity should be enforced.

                    • monotonic_grid_size (int) – The size of the grid, s, in 1. above. Defaults to 20.

                    • min_f_val (float, optional) – If provided, maintains this minimum in the projection in 5. Defaults to None.

                    • -
                    • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

                    • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

                    • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior. Defaults to None.

                    • likelihood (Likelihood, optional) – The likelihood function to use. If None defaults to Gaussian likelihood. Defaults to None.

                    • -
                    • inducing_size (int, optional) – The number of inducing points to use. Defaults to None.

                    • +
                    • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                    • +
                    • inducing_size (int) – The number of inducing points to use. Defaults to 100.

                    • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

                    • optimizer_options (Optional[Dict[str, Any]]) –

                    • @@ -1740,35 +1585,43 @@

                      Submodules
                      Parameters:
                        -
                      • stim_dim (int) – Index of the intensity (monotonic) dimension. Defaults to 0.

                      • -
                      • slope_mean_module (gpytorch.means.Mean, optional) – Mean module to use (default: constant mean) for slope.

                      • -
                      • slope_covar_module (gpytorch.kernels.Kernel, optional) – Covariance kernel to use (default: scaled RBF) for slope.

                      • -
                      • offset_mean_module (gpytorch.means.Mean, optional) – Mean module to use (default: constant mean) for offset.

                      • -
                      • offset_covar_module (gpytorch.kernels.Kernel, optional) – Covariance kernel to use (default: scaled RBF) for offset.

                      • -
                      • likelihood (gpytorch.likelihood.Likelihood, optional)) – defaults to bernoulli with logistic input and a floor of .5

                      • -
                      • slope_mean (float) – The mean of the slope. Defaults to 2.

                      • -
                      • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

                      • +
                      • inducing_size (int) – Number of inducing points. Defaults to 100.

                      • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

                      • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

                      • -
                      • lb (torch.Tensor) –

                      • -
                      • ub (torch.Tensor) –

                      • -
                      • inducing_point_method (InducingPointAllocator) –

                      • -
                      • dim (int, optional) –

                      • +
                      • dim (int) –

                      • +
                      • stim_dim (int) –

                      • +
                      • slope_mean_module (Optional[gpytorch.means.Mean]) –

                      • +
                      • slope_covar_module (Optional[gpytorch.kernels.Kernel]) –

                      • +
                      • offset_mean_module (Optional[gpytorch.means.Mean]) –

                      • +
                      • offset_covar_module (Optional[gpytorch.kernels.Kernel]) –

                      • +
                      • likelihood (Optional[Likelihood]) –

                      • +
                      • slope_mean (float) –

                      • +
                      • inducing_point_method (InducingPointAllocator, optional) –

                      @@ -1852,35 +1705,33 @@

                      Submodules
                      Parameters:
                        -
                      • stim_dim (int) – Index of the intensity (monotonic) dimension. Defaults to 0.

                      • -
                      • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior.

                      • -
                      • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a -gamma prior.

                      • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to linear-Bernouli likelihood with probit link.

                      • slope_mean (float) – The mean of the slope. Defaults to 2.

                      • -
                      • inducing_size (int, optional) – Number of inducing points. Defaults to 99.

                      • +
                      • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                      • +
                      • inducing_size (int) – Number of inducing points. Defaults to 100.

                      • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time.

                      • optimizer_options (Dict[str, Any], optional) – Optimizer options to pass to the SciPy optimizer during fitting. Assumes we are using L-BFGS-B.

                      • -
                      • lb (torch.Tensor) –

                      • -
                      • ub (torch.Tensor) –

                      • -
                      • inducing_point_method (InducingPointAllocator) –

                      • -
                      • dim (int, optional) –

                      • +
                      • dim (int) –

                      • +
                      • stim_dim (int) –

                      • +
                      • mean_module (Optional[gpytorch.means.Mean]) –

                      • +
                      • covar_module (gpytorch.kernels.Kernel, optional) –

                      @@ -2016,17 +1867,15 @@

                      Submodules
                      Parameters:
                        -
                      • lb (torch.Tensor) – Lower bounds of the parameters.

                      • -
                      • ub (torch.Tensor) – Upper bounds of the parameters.

                      • -
                      • inducing_point_method (InducingPointAllocator) – The method to use to select the inducing points.

                      • -
                      • dim (int, optional) – The number of dimensions in the parameter space. If None, it is inferred from the size -of lb and ub. Defaults to None.

                      • +
                      • dim (int) – The number of dimensions in the parameter space.

                      • mean_module (gpytorch.means.Mean, optional) – GP mean class. Defaults to a constant with a normal prior. Defaults to None.

                      • covar_module (gpytorch.kernels.Kernel, optional) – GP covariance kernel class. Defaults to scaled RBF with a gamma prior.

                      • likelihood (gpytorch.likelihood.Likelihood, optional) – The likelihood function to use. If None defaults to Beta likelihood.

                      • -
                      • inducing_size (int, optional) – Number of inducing points. Defaults to 100.

                      • +
                      • inducing_point_method (InducingPointAllocator, optional) – The method to use for selecting inducing points. +If not set, a GreedyVarianceReduction is made.

                      • +
                      • inducing_size (int) – Number of inducing points. Defaults to 100.

                      • max_fit_time (float, optional) – The maximum amount of time, in seconds, to spend fitting the model. If None, there is no limit to the fitting time. Defaults to None.

                      • optimizer_options (Optional[Dict[str, Any]]) –

                      • @@ -2186,333 +2035,6 @@

                        Submodules -
                        -class aepsych.models.AutoAllocator(*args, **kwargs)[source]
                        -

                        Bases: BaseAllocator

                        -

                        An inducing point allocator that dynamically chooses an allocation strategy -based on the number of unique data points available.

                        -

                        Initialize the AutoAllocator with a fallback allocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • fallback_allocator (InducingPointAllocator, optional) – Allocator to use if there are -more unique points than required.

                        • -
                        • bounds (Optional[Tensor]) –

                        • -
                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Allocate inducing points by either using the unique input data directly -or falling back to another allocation method if there are too many unique points.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – A tensor of shape (n, d) containing the input data.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        A (num_inducing, d)-dimensional tensor of inducing points.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the AutoAllocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the AutoAllocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        -
                        -
                        -class aepsych.models.KMeansAllocator(*args, **kwargs)[source]
                        -

                        Bases: BaseAllocator

                        -

                        An inducing point allocator that uses k-means++ to allocate inducing points.

                        -

                        Initialize the KMeansAllocator.

                        -
                        -
                        Parameters:
                        -

                        bounds (Optional[Tensor]) –

                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Generates num_inducing inducing points using k-means++ initialization on the input data.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – A tensor of shape (n, d) containing the input data.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        A (num_inducing, d)-dimensional tensor of inducing points selected via k-means++.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the KMeansAllocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the KMeansAllocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        -
                        -
                        -class aepsych.models.SobolAllocator(*args, **kwargs)[source]
                        -

                        Bases: BaseAllocator

                        -

                        An inducing point allocator that uses Sobol sequences to allocate inducing points.

                        -

                        Initialize the SobolAllocator with bounds.

                        -
                        -
                        Parameters:
                        -

                        bounds (Tensor) –

                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Generates num_inducing inducing points within the specified bounds using Sobol sampling.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – Input tensor, not required for Sobol sampling.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        A (num_inducing, d)-dimensional tensor of inducing points within the specified bounds.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        Raises:
                        -

                        ValueError – If bounds is not provided.

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the SobolAllocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the SobolAllocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        -
                        -
                        -class aepsych.models.DummyAllocator(*args, **kwargs)[source]
                        -

                        Bases: BaseAllocator

                        -

                        Initialize the DummyAllocator with bounds.

                        -
                        -
                        Parameters:
                        -

                        bounds (torch.Tensor) – Bounds for allocating points. Should be of shape (2, d).

                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Allocate inducing points by returning zeros of the appropriate shape.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – Input tensor, not required for DummyAllocator.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        A (num_inducing, d)-dimensional tensor of zeros.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the DummyAllocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the DummyAllocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        -
                        -
                        -class aepsych.models.FixedAllocator(*args, **kwargs)[source]
                        -

                        Bases: BaseAllocator

                        -

                        Initialize the FixedAllocator with inducing points and bounds.

                        -
                        -
                        Parameters:
                        -
                          -
                        • points (torch.Tensor) – Inducing points to use.

                        • -
                        • bounds (torch.Tensor, optional) – Bounds for allocating points. Should be of shape (2, d).

                        • -
                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Allocate inducing points by returning the fixed inducing points.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – Input tensor, not required for FixedAllocator.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        The fixed inducing points.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the FixedAllocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the FixedAllocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        -
                        -
                        -class aepsych.models.GreedyVarianceReduction(*args, **kwargs)[source]
                        -

                        Bases: GreedyVarianceReduction, ConfigurableMixin

                        -

                        Initialize the GreedyVarianceReduction with bounds.

                        -
                        -
                        Parameters:
                        -

                        bounds (torch.Tensor, optional) – Bounds for allocating points. Should be of shape (2, d).

                        -
                        -
                        -
                        -
                        -allocate_inducing_points(inputs=None, covar_module=None, num_inducing=10, input_batch_shape=torch.Size([]))[source]
                        -

                        Allocate inducing points using the GreedyVarianceReduction strategy.

                        -
                        -
                        Parameters:
                        -
                          -
                        • inputs (torch.Tensor) – Input tensor, not required for GreedyVarianceReduction.

                        • -
                        • covar_module (torch.nn.Module, optional) – Kernel covariance module; included for API compatibility, but not used here.

                        • -
                        • num_inducing (int, optional) – The number of inducing points to generate. Defaults to 10.

                        • -
                        • input_batch_shape (torch.Size, optional) – Batch shape, defaults to an empty size; included for API compatibility, but not used here.

                        • -
                        -
                        -
                        Returns:
                        -

                        The allocated inducing points.

                        -
                        -
                        Return type:
                        -

                        torch.Tensor

                        -
                        -
                        -
                        -
                        -
                        -classmethod get_config_options(config, name=None, options=None)[source]
                        -

                        Get configuration options for the GreedyVarianceReduction allocator.

                        -
                        -
                        Parameters:
                        -
                          -
                        • config (Config) – Configuration object.

                        • -
                        • name (str, optional) – Name of the allocator, defaults to None.

                        • -
                        • options (Dict[str, Any], optional) – Additional options, defaults to None.

                        • -
                        -
                        -
                        Returns:
                        -

                        Configuration options for the GreedyVarianceReduction allocator.

                        -
                        -
                        Return type:
                        -

                        Dict[str, Any]

                        -
                        -
                        -
                        -
                        diff --git a/api/utils.html b/api/utils.html index 04095ce5f..e12c1347c 100644 --- a/api/utils.html +++ b/api/utils.html @@ -246,6 +246,24 @@

                        aepsych.utils +
                        +aepsych.utils.get_dims(config)[source]
                        +

                        Return the number of dimensions in the parameter space. This accounts for any +transforms that may modify the the parameter space for the model (e.g., Fixed +parameters will not be included).

                        +
                        +
                        Parameters:
                        +

                        config (Config) – The config to look for the number of dimensions.

                        +
                        +
                        Returns:
                        +

                        The number of dimensions in the search space.

                        +
                        +
                        Return type:
                        +

                        int

                        +
                        +
                        +
                        diff --git a/api/utils/index.html b/api/utils/index.html index 04095ce5f..e12c1347c 100644 --- a/api/utils/index.html +++ b/api/utils/index.html @@ -246,6 +246,24 @@

                        aepsych.utils +
                        +aepsych.utils.get_dims(config)[source]
                        +

                        Return the number of dimensions in the parameter space. This accounts for any +transforms that may modify the the parameter space for the model (e.g., Fixed +parameters will not be included).

                        +
                        +
                        Parameters:
                        +

                        config (Config) – The config to look for the number of dimensions.

                        +
                        +
                        Returns:
                        +

                        The number of dimensions in the search space.

                        +
                        +
                        Return type:
                        +

                        int

                        +
                        +
                        +
                        diff --git a/demos/ParticleEffectDemo.html b/demos/ParticleEffectDemo.html index 4a20cf744..2d61a5cbf 100644 --- a/demos/ParticleEffectDemo.html +++ b/demos/ParticleEffectDemo.html @@ -64,7 +64,7 @@
                        -
                        +

                        Particle Effect Demo