From bf2e11d34b441d6e58eed30e4deb9e23b9681359 Mon Sep 17 00:00:00 2001 From: t-minus Date: Thu, 23 May 2024 22:15:08 +0800 Subject: [PATCH 1/8] Review: Add warning if users pass only optimizer_kwargs but not optimizer --- nbs/common.base_model.ipynb | 6 +++++- nbs/core.ipynb | 30 +++++++++++++++++++++++++++- neuralforecast/common/_base_model.py | 6 +++++- 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/nbs/common.base_model.ipynb b/nbs/common.base_model.ipynb index 384edab5b..4f52c7214 100644 --- a/nbs/common.base_model.ipynb +++ b/nbs/common.base_model.ipynb @@ -153,7 +153,7 @@ " if optimizer is not None and not issubclass(optimizer, torch.optim.Optimizer):\n", " raise TypeError(\"optimizer is not a valid subclass of torch.optim.Optimizer\")\n", " self.optimizer = optimizer\n", - " self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs else {}\n", + " self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {}\n", "\n", " # Variables\n", " self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else []\n", @@ -383,6 +383,10 @@ " optimizer_kwargs['lr'] = self.learning_rate\n", " optimizer = self.optimizer(params=self.parameters(), **optimizer_kwargs)\n", " else:\n", + " if self.optimizer_kwargs:\n", + " warnings.warn(\n", + " \"ignoring optimizer_kwargs as the optimizer is not specified\"\n", + " ) \n", " optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n", " scheduler = {\n", " 'scheduler': torch.optim.lr_scheduler.StepLR(\n", diff --git a/nbs/core.ipynb b/nbs/core.ipynb index 119964f66..f4a925697 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -2847,7 +2847,7 @@ "outputs": [], "source": [ "#| hide\n", - "# test that if we pass in \"lr\" parameter, we expect warning and it ignores the passed in 'lr' parameter\n", + "# test that if we pass \"lr\" parameter, we expect warning and it ignores the passed in 'lr' parameter\n", "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", "for nf_model in [NHITS, RNN, StemGNN]:\n", @@ -2867,6 +2867,34 @@ " nf.fit(AirPassengersPanel_train)\n", " assert any(\"ignoring learning rate passed in optimizer_kwargs, using the model's learning rate\" in str(w.message) for w in issued_warnings)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ddeec474", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# test that if we pass \"optimizer_kwargs\" but not \"optimizer\", we expect a warning\n", + "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", + "\n", + "for nf_model in [NHITS, RNN, StemGNN]:\n", + " params = {\n", + " \"h\": 12, \n", + " \"input_size\": 24,\n", + " \"max_steps\": 1,\n", + " \"optimizer_kwargs\": {\"lr\": 0.8, \"rho\": 0.45}\n", + " }\n", + " if nf_model.__name__ == \"StemGNN\":\n", + " params.update({\"n_series\": 2})\n", + " models = [nf_model(**params)]\n", + " nf = NeuralForecast(models=models, freq='M')\n", + " with warnings.catch_warnings(record=True) as issued_warnings:\n", + " warnings.simplefilter('always', UserWarning)\n", + " nf.fit(AirPassengersPanel_train)\n", + " assert any(\"ignoring optimizer_kwargs as the optimizer is not specified\" in str(w.message) for w in issued_warnings)" + ] } ], "metadata": { diff --git a/neuralforecast/common/_base_model.py b/neuralforecast/common/_base_model.py index f2ce18d8e..258e62adb 100644 --- a/neuralforecast/common/_base_model.py +++ b/neuralforecast/common/_base_model.py @@ -106,7 +106,7 @@ def __init__( "optimizer is not a valid subclass of torch.optim.Optimizer" ) self.optimizer = optimizer - self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs else {} + self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {} # Variables self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else [] @@ -354,6 +354,10 @@ def configure_optimizers(self): optimizer_kwargs["lr"] = self.learning_rate optimizer = self.optimizer(params=self.parameters(), **optimizer_kwargs) else: + if self.optimizer_kwargs: + warnings.warn( + "ignoring optimizer_kwargs as the optimizer is not specified" + ) optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) scheduler = { "scheduler": torch.optim.lr_scheduler.StepLR( From 7d837992cd2f5270a9a9f9e7151393d2069ccf85 Mon Sep 17 00:00:00 2001 From: t-minus Date: Fri, 24 May 2024 00:54:24 +0800 Subject: [PATCH 2/8] option to modify configure_optimizers --- nbs/common.base_model.ipynb | 54 +++++++++++++++++++++++++-- nbs/core.ipynb | 56 ++++++++++++++++++++++++++++ neuralforecast/common/_base_model.py | 50 +++++++++++++++++++++++++ 3 files changed, 156 insertions(+), 4 deletions(-) diff --git a/nbs/common.base_model.ipynb b/nbs/common.base_model.ipynb index 4f52c7214..c01dd5edc 100644 --- a/nbs/common.base_model.ipynb +++ b/nbs/common.base_model.ipynb @@ -155,6 +155,9 @@ " self.optimizer = optimizer\n", " self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {}\n", "\n", + " # customized by set_configure_optimizers()\n", + " self.config_optimizers = None\n", + "\n", " # Variables\n", " self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else []\n", " self.hist_exog_list = list(hist_exog_list) if hist_exog_list is not None else []\n", @@ -374,6 +377,9 @@ " random.seed(self.random_seed)\n", "\n", " def configure_optimizers(self):\n", + " if self.config_optimizers is not None:\n", + " return self.config_optimizers\n", + " \n", " if self.optimizer:\n", " optimizer_signature = inspect.signature(self.optimizer)\n", " optimizer_kwargs = deepcopy(self.optimizer_kwargs)\n", @@ -389,13 +395,53 @@ " ) \n", " optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n", " scheduler = {\n", - " 'scheduler': torch.optim.lr_scheduler.StepLR(\n", + " \"scheduler\": torch.optim.lr_scheduler.StepLR(\n", " optimizer=optimizer, step_size=self.lr_decay_steps, gamma=0.5\n", " ),\n", - " 'frequency': 1,\n", - " 'interval': 'step',\n", + " \"frequency\": 1,\n", + " \"interval\": \"step\",\n", + " }\n", + " return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler}\n", + " \n", + " def set_configure_optimizers(\n", + " self, \n", + " optimizer=None,\n", + " scheduler=None,\n", + " interval='step',\n", + " frequency=1,\n", + " monitor='val_loss',\n", + " strict=True,\n", + " name=None\n", + " ):\n", + " \"\"\"Helper function to customize the lr_scheduler_config as detailed in \n", + " https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#configure-optimizers\n", + "\n", + " Calling set_configure_optimizers() with valid `optimizer`, `scheduler` shall modify the returned \n", + " dictionary of key='optimizer', key='lr_scheduler' in configure_optimizers().\n", + " Note that the default choice of `interval` in set_configure_optiizers() is 'step',\n", + " which differs from the choice of 'epoch' used in lightning_module. \n", + " \"\"\"\n", + " lr_scheduler_config = {\n", + " 'interval': interval,\n", + " 'frequency': frequency,\n", + " 'monitor': monitor,\n", + " 'strict': strict,\n", + " 'name': name,\n", " }\n", - " return {'optimizer': optimizer, 'lr_scheduler': scheduler}\n", + " if scheduler is not None and optimizer is not None:\n", + " if not isinstance(scheduler, torch.optim.lr_scheduler.LRScheduler):\n", + " raise TypeError(\"scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler\")\n", + " if not isinstance(optimizer, torch.optim.Optimizer):\n", + " raise TypeError(\"optimizer is not a valid instance of torch.optim.Optimizer\") \n", + " \n", + " lr_scheduler_config[\"scheduler\"] = scheduler\n", + " self.config_optimizers = {\n", + " 'optimizer': optimizer,\n", + " 'lr_scheduler': lr_scheduler_config,\n", + " }\n", + " else:\n", + " # falls back to default option as specified in configure_optimizers()\n", + " self.config_optimizers = None\n", "\n", " def get_test_size(self):\n", " return self.test_size\n", diff --git a/nbs/core.ipynb b/nbs/core.ipynb index f4a925697..6329c5e94 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -2895,6 +2895,62 @@ " nf.fit(AirPassengersPanel_train)\n", " assert any(\"ignoring optimizer_kwargs as the optimizer is not specified\" in str(w.message) for w in issued_warnings)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fef9925d-f80c-4851-ba35-4fd6e20162db", + "metadata": {}, + "outputs": [], + "source": [ + "#| hide\n", + "# test customized lr_scheduler behavior such that the user defined lr_scheduler result should differ from default\n", + "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", + "\n", + "for nf_model in [NHITS, RNN, StemGNN]:\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 2}\n", + " if nf_model.__name__ == \"StemGNN\":\n", + " params.update({\"n_series\": 2})\n", + " models = [nf_model(**params)]\n", + " nf = NeuralForecast(models=models, freq='M')\n", + " nf.fit(AirPassengersPanel_train)\n", + " default_predict = nf.predict()\n", + " mean = default_predict.loc[:, nf_model.__name__].mean()\n", + "\n", + " # calling set_configure_optimizers() shall modify the default behavior of configure_optimizers()\n", + " optimizer = torch.optim.Adadelta(params=models[0].parameters(), rho=0.45)\n", + " scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer=optimizer, factor=0.78)\n", + " models[0].set_configure_optimizers(\n", + " optimizer=optimizer,\n", + " scheduler=scheduler,\n", + " )\n", + " nf2 = NeuralForecast(models=models, freq='M')\n", + " nf2.fit(AirPassengersPanel_train)\n", + " customized_predict = nf2.predict()\n", + " mean2 = customized_predict.loc[:, nf_model.__name__].mean()\n", + " assert mean2 != mean\n", + "\n", + " # test that frequency configured has effect on optimization behavior\n", + " models[0].set_configure_optimizers(\n", + " optimizer=optimizer,\n", + " scheduler=scheduler,\n", + " frequency=2,\n", + " )\n", + " nf3 = NeuralForecast(models=models, freq='M')\n", + " nf3.fit(AirPassengersPanel_train)\n", + " customized_predict3 = nf3.predict()\n", + " mean3 = customized_predict3.loc[:, nf_model.__name__].mean()\n", + " assert mean3 != mean\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1e9d282-dd4f-4268-8651-4d13114f8240", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/neuralforecast/common/_base_model.py b/neuralforecast/common/_base_model.py index 258e62adb..74a394f8f 100644 --- a/neuralforecast/common/_base_model.py +++ b/neuralforecast/common/_base_model.py @@ -108,6 +108,9 @@ def __init__( self.optimizer = optimizer self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {} + # customized by set_configure_optimizers() + self.config_optimizers = None + # Variables self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else [] self.hist_exog_list = list(hist_exog_list) if hist_exog_list is not None else [] @@ -343,6 +346,9 @@ def on_fit_start(self): random.seed(self.random_seed) def configure_optimizers(self): + if self.config_optimizers is not None: + return self.config_optimizers + if self.optimizer: optimizer_signature = inspect.signature(self.optimizer) optimizer_kwargs = deepcopy(self.optimizer_kwargs) @@ -368,6 +374,50 @@ def configure_optimizers(self): } return {"optimizer": optimizer, "lr_scheduler": scheduler} + def set_configure_optimizers( + self, + optimizer=None, + scheduler=None, + interval="step", + frequency=1, + monitor="val_loss", + strict=True, + name=None, + ): + """Helper function to customize the lr_scheduler_config as detailed in + https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#configure-optimizers + + Calling set_configure_optimizers() with valid `optimizer`, `scheduler` shall modify the returned + dictionary of key='optimizer', key='lr_scheduler' in configure_optimizers(). + Note that the default choice of `interval` in set_configure_optiizers() is 'step', + which differs from the choice of 'epoch' used in lightning_module. + """ + lr_scheduler_config = { + "interval": interval, + "frequency": frequency, + "monitor": monitor, + "strict": strict, + "name": name, + } + if scheduler is not None and optimizer is not None: + if not isinstance(scheduler, torch.optim.lr_scheduler.LRScheduler): + raise TypeError( + "scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler" + ) + if not isinstance(optimizer, torch.optim.Optimizer): + raise TypeError( + "optimizer is not a valid instance of torch.optim.Optimizer" + ) + + lr_scheduler_config["scheduler"] = scheduler + self.config_optimizers = { + "optimizer": optimizer, + "lr_scheduler": lr_scheduler_config, + } + else: + # falls back to default option as specified in configure_optimizers() + self.config_optimizers = None + def get_test_size(self): return self.test_size From 3539eb3a8f397789145367f0d20544266329c860 Mon Sep 17 00:00:00 2001 From: t-minus Date: Fri, 2 Aug 2024 17:55:26 +0000 Subject: [PATCH 3/8] Add example on ReduceLROnPlateau --- nbs/core.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nbs/core.ipynb b/nbs/core.ipynb index e1243fe9a..d6e57dec8 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -3193,13 +3193,13 @@ " models[0].set_configure_optimizers(\n", " optimizer=optimizer,\n", " scheduler=scheduler,\n", - " monitor=\"val_loss\",\n", + " monitor=\"train_loss\",\n", " )\n", - " nf3 = NeuralForecast(models=models, freq='M')\n", - " nf3.fit(AirPassengersPanel_train)\n", - " customized_predict3 = nf3.predict()\n", - " mean3 = customized_predict3.loc[:, nf_model.__name__].mean()\n", - " assert mean3 != mean \n", + " nf4 = NeuralForecast(models=models, freq='M')\n", + " nf4.fit(AirPassengersPanel_train)\n", + " customized_predict4 = nf4.predict()\n", + " mean4 = customized_predict4.loc[:, nf_model.__name__].mean()\n", + " assert mean4 != mean \n", "\n", "\n", "\n" From 2e3d7c6f60bc30656373fc6b7f41b0cb565986cc Mon Sep 17 00:00:00 2001 From: t-minus Date: Wed, 11 Dec 2024 17:43:34 +0000 Subject: [PATCH 4/8] Remove old interface and deprecate the arguments nbdev_clean --clear_all remove unnecessary changes --- nbs/common.base_model.ipynb | 59 ++----- nbs/common.base_multivariate.ipynb | 10 +- nbs/common.base_recurrent.ipynb | 8 - nbs/common.base_windows.ipynb | 8 - nbs/core.ipynb | 178 +++----------------- nbs/models.autoformer.ipynb | 12 -- nbs/models.bitcn.ipynb | 12 -- nbs/models.deepar.ipynb | 12 -- nbs/models.deepnpts.ipynb | 12 -- nbs/models.dilated_rnn.ipynb | 12 -- nbs/models.dlinear.ipynb | 12 -- nbs/models.fedformer.ipynb | 12 -- nbs/models.gru.ipynb | 12 -- nbs/models.informer.ipynb | 12 -- nbs/models.itransformer.ipynb | 12 -- nbs/models.kan.ipynb | 6 - nbs/models.lstm.ipynb | 12 -- nbs/models.mlp.ipynb | 12 -- nbs/models.mlpmultivariate.ipynb | 12 -- nbs/models.nbeats.ipynb | 12 -- nbs/models.nbeatsx.ipynb | 12 -- nbs/models.nhits.ipynb | 12 -- nbs/models.nlinear.ipynb | 12 -- nbs/models.patchtst.ipynb | 12 -- nbs/models.rmok.ipynb | 12 -- nbs/models.rnn.ipynb | 12 -- nbs/models.softs.ipynb | 12 -- nbs/models.stemgnn.ipynb | 12 -- nbs/models.tcn.ipynb | 12 -- nbs/models.tft.ipynb | 12 -- nbs/models.tide.ipynb | 12 -- nbs/models.timellm.ipynb | 12 -- nbs/models.timemixer.ipynb | 12 -- nbs/models.timesnet.ipynb | 16 +- nbs/models.tsmixer.ipynb | 12 -- nbs/models.tsmixerx.ipynb | 12 -- nbs/models.vanillatransformer.ipynb | 12 -- neuralforecast/common/_base_model.py | 73 ++------ neuralforecast/common/_base_multivariate.py | 8 - neuralforecast/common/_base_recurrent.py | 8 - neuralforecast/common/_base_windows.py | 8 - neuralforecast/models/autoformer.py | 12 -- neuralforecast/models/bitcn.py | 12 -- neuralforecast/models/deepar.py | 12 -- neuralforecast/models/deepnpts.py | 12 -- neuralforecast/models/dilated_rnn.py | 12 -- neuralforecast/models/dlinear.py | 12 -- neuralforecast/models/fedformer.py | 12 -- neuralforecast/models/gru.py | 12 -- neuralforecast/models/informer.py | 12 -- neuralforecast/models/itransformer.py | 12 -- neuralforecast/models/kan.py | 6 - neuralforecast/models/lstm.py | 12 -- neuralforecast/models/mlp.py | 12 -- neuralforecast/models/mlpmultivariate.py | 12 -- neuralforecast/models/nbeats.py | 12 -- neuralforecast/models/nbeatsx.py | 12 -- neuralforecast/models/nhits.py | 12 -- neuralforecast/models/nlinear.py | 12 -- neuralforecast/models/patchtst.py | 12 -- neuralforecast/models/rmok.py | 12 -- neuralforecast/models/rnn.py | 12 -- neuralforecast/models/softs.py | 12 -- neuralforecast/models/stemgnn.py | 12 -- neuralforecast/models/tcn.py | 12 -- neuralforecast/models/tft.py | 12 -- neuralforecast/models/tide.py | 12 -- neuralforecast/models/timellm.py | 12 -- neuralforecast/models/timemixer.py | 12 -- neuralforecast/models/timesnet.py | 14 -- neuralforecast/models/tsmixer.py | 12 -- neuralforecast/models/tsmixerx.py | 12 -- neuralforecast/models/vanillatransformer.py | 12 -- 73 files changed, 47 insertions(+), 1075 deletions(-) diff --git a/nbs/common.base_model.ipynb b/nbs/common.base_model.ipynb index 444074b98..bc8c00f23 100644 --- a/nbs/common.base_model.ipynb +++ b/nbs/common.base_model.ipynb @@ -34,7 +34,6 @@ "import random\n", "import warnings\n", "from contextlib import contextmanager\n", - "from copy import deepcopy\n", "from dataclasses import dataclass\n", "\n", "import fsspec\n", @@ -121,10 +120,6 @@ " random_seed,\n", " loss,\n", " valid_loss,\n", - " optimizer,\n", - " optimizer_kwargs,\n", - " lr_scheduler,\n", - " lr_scheduler_kwargs,\n", " futr_exog_list,\n", " hist_exog_list,\n", " stat_exog_list,\n", @@ -150,18 +145,6 @@ " self.train_trajectories = []\n", " self.valid_trajectories = []\n", "\n", - " # Optimization\n", - " if optimizer is not None and not issubclass(optimizer, torch.optim.Optimizer):\n", - " raise TypeError(\"optimizer is not a valid subclass of torch.optim.Optimizer\")\n", - " self.optimizer = optimizer\n", - " self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {}\n", - "\n", - " # lr scheduler\n", - " if lr_scheduler is not None and not issubclass(lr_scheduler, torch.optim.lr_scheduler.LRScheduler):\n", - " raise TypeError(\"lr_scheduler is not a valid subclass of torch.optim.lr_scheduler.LRScheduler\")\n", - " self.lr_scheduler = lr_scheduler\n", - " self.lr_scheduler_kwargs = lr_scheduler_kwargs if lr_scheduler_kwargs is not None else {}\n", - "\n", " # customized by set_configure_optimizers()\n", " self.config_optimizers = None\n", "\n", @@ -412,41 +395,19 @@ "\n", " def configure_optimizers(self):\n", " if self.config_optimizers is not None:\n", + " # return the customized optimizer settings if specified\n", " return self.config_optimizers\n", - " \n", - " if self.optimizer:\n", - " optimizer_signature = inspect.signature(self.optimizer)\n", - " optimizer_kwargs = deepcopy(self.optimizer_kwargs)\n", - " if 'lr' in optimizer_signature.parameters:\n", - " if 'lr' in optimizer_kwargs:\n", - " warnings.warn(\"ignoring learning rate passed in optimizer_kwargs, using the model's learning rate\")\n", - " optimizer_kwargs['lr'] = self.learning_rate\n", - " optimizer = self.optimizer(params=self.parameters(), **optimizer_kwargs)\n", - " else:\n", - " if self.optimizer_kwargs:\n", - " warnings.warn(\n", - " \"ignoring optimizer_kwargs as the optimizer is not specified\"\n", - " )\n", - " optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n", " \n", - " lr_scheduler = {'frequency': 1, 'interval': 'step'}\n", - " if self.lr_scheduler:\n", - " lr_scheduler_signature = inspect.signature(self.lr_scheduler)\n", - " lr_scheduler_kwargs = deepcopy(self.lr_scheduler_kwargs)\n", - " if 'optimizer' in lr_scheduler_signature.parameters:\n", - " if 'optimizer' in lr_scheduler_kwargs:\n", - " warnings.warn(\"ignoring optimizer passed in lr_scheduler_kwargs, using the model's optimizer\")\n", - " del lr_scheduler_kwargs['optimizer']\n", - " lr_scheduler['scheduler'] = self.lr_scheduler(optimizer=optimizer, **lr_scheduler_kwargs)\n", - " else:\n", - " if self.lr_scheduler_kwargs:\n", - " warnings.warn(\n", - " \"ignoring lr_scheduler_kwargs as the lr_scheduler is not specified\"\n", - " ) \n", - " lr_scheduler['scheduler'] = torch.optim.lr_scheduler.StepLR(\n", + " # default choice\n", + " optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n", + " scheduler = {\n", + " \"scheduler\": torch.optim.lr_scheduler.StepLR(\n", " optimizer=optimizer, step_size=self.lr_decay_steps, gamma=0.5\n", - " )\n", - " return {'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n", + " ),\n", + " \"frequency\": 1,\n", + " \"interval\": \"step\",\n", + " }\n", + " return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler}\n", "\n", " def set_configure_optimizers(\n", " self, \n", diff --git a/nbs/common.base_multivariate.ipynb b/nbs/common.base_multivariate.ipynb index f1321600d..43e31a0ed 100644 --- a/nbs/common.base_multivariate.ipynb +++ b/nbs/common.base_multivariate.ipynb @@ -105,20 +105,12 @@ " drop_last_loader=False,\n", " random_seed=1, \n", " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", " loss=loss,\n", - " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs, \n", + " valid_loss=valid_loss, \n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", diff --git a/nbs/common.base_recurrent.ipynb b/nbs/common.base_recurrent.ipynb index 7b0ed5585..38ac09dba 100644 --- a/nbs/common.base_recurrent.ipynb +++ b/nbs/common.base_recurrent.ipynb @@ -111,20 +111,12 @@ " drop_last_loader=False,\n", " random_seed=1, \n", " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", diff --git a/nbs/common.base_windows.ipynb b/nbs/common.base_windows.ipynb index 80f12e5f5..ced5a7913 100644 --- a/nbs/common.base_windows.ipynb +++ b/nbs/common.base_windows.ipynb @@ -115,20 +115,12 @@ " drop_last_loader=False,\n", " random_seed=1,\n", " alias=None,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", " loss=loss,\n", " valid_loss=valid_loss,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", diff --git a/nbs/core.ipynb b/nbs/core.ipynb index e916ef356..3a83b52a8 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -3172,15 +3172,22 @@ " mean = default_optimizer_predict.loc[:, nf_model.__name__].mean()\n", "\n", " # using a customized optimizer\n", - " params.update({\n", - " \"optimizer\": torch.optim.Adadelta,\n", - " \"optimizer_kwargs\": {\"rho\": 0.45}, \n", - " })\n", + " optimizer = torch.optim.Adadelta(params=models2[0].parameters(), rho=0.75)\n", + " scheduler=torch.optim.lr_scheduler.StepLR(\n", + " optimizer=optimizer, step_size=10e7, gamma=0.5\n", + " )\n", + "\n", " models2 = [nf_model(**params)]\n", + " models2[0].set_configure_optimizers(\n", + " optimizer=optimizer,\n", + " scheduler=scheduler,\n", + " )\n", + "\n", " nf2 = NeuralForecast(models=models2, freq='M')\n", " nf2.fit(AirPassengersPanel_train)\n", " customized_optimizer_predict = nf2.predict()\n", " mean2 = customized_optimizer_predict.loc[:, nf_model.__name__].mean()\n", + "\n", " assert mean2 != mean" ] }, @@ -3194,100 +3201,18 @@ "#| hide\n", "# test that if the user-defined optimizer is not a subclass of torch.optim.optimizer, failed with exception\n", "# tests cover different types of base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "test_fail(lambda: NHITS(h=12, input_size=24, max_steps=10, optimizer=torch.nn.Module), contains=\"optimizer is not a valid subclass of torch.optim.Optimizer\")\n", - "test_fail(lambda: RNN(h=12, input_size=24, max_steps=10, optimizer=torch.nn.Module), contains=\"optimizer is not a valid subclass of torch.optim.Optimizer\")\n", - "test_fail(lambda: StemGNN(h=12, input_size=24, max_steps=10, n_series=2, optimizer=torch.nn.Module), contains=\"optimizer is not a valid subclass of torch.optim.Optimizer\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d908240f", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if we pass \"lr\" parameter, we expect warning and it ignores the passed in 'lr' parameter\n", - "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", - "for nf_model in [NHITS, RNN, StemGNN]:\n", - " params = {\n", - " \"h\": 12, \n", - " \"input_size\": 24, \n", - " \"max_steps\": 1, \n", - " \"optimizer\": torch.optim.Adadelta, \n", - " \"optimizer_kwargs\": {\"lr\": 0.8, \"rho\": 0.45}\n", - " }\n", + "for model_name in [NHITS, RNN, StemGNN]:\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 10}\n", " if nf_model.__name__ == \"StemGNN\":\n", " params.update({\"n_series\": 2})\n", - " models = [nf_model(**params)]\n", - " nf = NeuralForecast(models=models, freq='M')\n", - " with warnings.catch_warnings(record=True) as issued_warnings:\n", - " warnings.simplefilter('always', UserWarning)\n", - " nf.fit(AirPassengersPanel_train)\n", - " assert any(\"ignoring learning rate passed in optimizer_kwargs, using the model's learning rate\" in str(w.message) for w in issued_warnings)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c97858b5-e6a0-4353-a48f-5a5460eb2314", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if we pass \"optimizer_kwargs\" but not \"optimizer\", we expect a warning\n", - "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", - "for nf_model in [NHITS, RNN, StemGNN]:\n", - " params = {\n", - " \"h\": 12, \n", - " \"input_size\": 24, \n", - " \"max_steps\": 1,\n", - " \"optimizer_kwargs\": {\"lr\": 0.8, \"rho\": 0.45}\n", - " }\n", - " if nf_model.__name__ == \"StemGNN\":\n", - " params.update({\"n_series\": 2})\n", - " models = [nf_model(**params)]\n", - " nf = NeuralForecast(models=models, freq='M')\n", - " with warnings.catch_warnings(record=True) as issued_warnings:\n", - " warnings.simplefilter('always', UserWarning)\n", - " nf.fit(AirPassengersPanel_train)\n", - " assert any(\"ignoring optimizer_kwargs as the optimizer is not specified\" in str(w.message) for w in issued_warnings)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24142322", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test customized lr_scheduler behavior such that the user defined lr_scheduler result should differ from default\n", - "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "\n", - "for nf_model in [NHITS, RNN, StemGNN]:\n", - " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 1}\n", - " if nf_model.__name__ == \"StemGNN\":\n", - " params.update({\"n_series\": 2})\n", - " models = [nf_model(**params)]\n", - " nf = NeuralForecast(models=models, freq='M')\n", - " nf.fit(AirPassengersPanel_train)\n", - " default_optimizer_predict = nf.predict()\n", - " mean = default_optimizer_predict.loc[:, nf_model.__name__].mean()\n", - "\n", - " # using a customized lr_scheduler, default is StepLR\n", - " params.update({\n", - " \"lr_scheduler\": torch.optim.lr_scheduler.ConstantLR,\n", - " \"lr_scheduler_kwargs\": {\"factor\": 0.78}, \n", - " })\n", - " models2 = [nf_model(**params)]\n", - " nf2 = NeuralForecast(models=models2, freq='M')\n", - " nf2.fit(AirPassengersPanel_train)\n", - " customized_optimizer_predict = nf2.predict()\n", - " mean2 = customized_optimizer_predict.loc[:, nf_model.__name__].mean()\n", - " assert mean2 != mean" + " model = model_name(**params) \n", + " optimizer = torch.nn.Module()\n", + " scheduler = torch.optim.lr_scheduler.StepLR(\n", + " optimizer=torch.optim.Adam(model.parameters()), step_size=10e7, gamma=0.5\n", + " ) \n", + " test_fail(lambda: model.set_configure_optimizers(optimizer=optimizer, scheduler=scheduler), contains=\"optimizer is not a valid instance of torch.optim.Optimizer\")\n" ] }, { @@ -3298,68 +3223,16 @@ "outputs": [], "source": [ "#| hide\n", - "# test that if the user-defined lr_scheduler is not a subclass of torch.optim.lr_scheduler, failed with exception\n", + "# test that if the user-defined scheduler is not a subclass of torch.optim.lr_scheduler, failed with exception\n", "# tests cover different types of base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "test_fail(lambda: NHITS(h=12, input_size=24, max_steps=10, lr_scheduler=torch.nn.Module), contains=\"lr_scheduler is not a valid subclass of torch.optim.lr_scheduler.LRScheduler\")\n", - "test_fail(lambda: RNN(h=12, input_size=24, max_steps=10, lr_scheduler=torch.nn.Module), contains=\"lr_scheduler is not a valid subclass of torch.optim.lr_scheduler.LRScheduler\")\n", - "test_fail(lambda: StemGNN(h=12, input_size=24, max_steps=10, n_series=2, lr_scheduler=torch.nn.Module), contains=\"lr_scheduler is not a valid subclass of torch.optim.lr_scheduler.LRScheduler\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b1d8bebb", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if we pass in \"optimizer\" parameter, we expect warning and it ignores them\n", - "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "\n", - "for nf_model in [NHITS, RNN, StemGNN]:\n", - " params = {\n", - " \"h\": 12, \n", - " \"input_size\": 24, \n", - " \"max_steps\": 1, \n", - " \"lr_scheduler\": torch.optim.lr_scheduler.ConstantLR, \n", - " \"lr_scheduler_kwargs\": {\"optimizer\": torch.optim.Adadelta, \"factor\": 0.22}\n", - " }\n", - " if nf_model.__name__ == \"StemGNN\":\n", - " params.update({\"n_series\": 2})\n", - " models = [nf_model(**params)]\n", - " nf = NeuralForecast(models=models, freq='M')\n", - " with warnings.catch_warnings(record=True) as issued_warnings:\n", - " warnings.simplefilter('always', UserWarning)\n", - " nf.fit(AirPassengersPanel_train)\n", - " assert any(\"ignoring optimizer passed in lr_scheduler_kwargs, using the model's optimizer\" in str(w.message) for w in issued_warnings)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "06febece", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if we pass in \"lr_scheduler_kwargs\" but not \"lr_scheduler\", we expect a warning\n", - "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", - "for nf_model in [NHITS, RNN, StemGNN]:\n", - " params = {\n", - " \"h\": 12, \n", - " \"input_size\": 24, \n", - " \"max_steps\": 1,\n", - " \"lr_scheduler_kwargs\": {\"optimizer\": torch.optim.Adadelta, \"factor\": 0.22}\n", - " }\n", + "for model_name in [NHITS, RNN, StemGNN]:\n", + " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 10}\n", " if nf_model.__name__ == \"StemGNN\":\n", " params.update({\"n_series\": 2})\n", - " models = [nf_model(**params)]\n", - " nf = NeuralForecast(models=models, freq='M')\n", - " with warnings.catch_warnings(record=True) as issued_warnings:\n", - " warnings.simplefilter('always', UserWarning)\n", - " nf.fit(AirPassengersPanel_train)\n", - " assert any(\"ignoring lr_scheduler_kwargs as the lr_scheduler is not specified\" in str(w.message) for w in issued_warnings)\n" + " model = model_name(**params)\n", + " optimizer = torch.optim.Adam(model.parameters())\n", + " test_fail(lambda: model.set_configure_optimizers(optimizer=optimizer, scheduler=torch.nn.Module), contains=\"scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler\")" ] }, { @@ -3493,7 +3366,6 @@ " models[0].set_configure_optimizers(\n", " optimizer=optimizer,\n", " scheduler=scheduler,\n", - "\n", " )\n", " nf2 = NeuralForecast(models=models, freq='M')\n", " nf2.fit(AirPassengersPanel_train)\n", diff --git a/nbs/models.autoformer.ipynb b/nbs/models.autoformer.ipynb index 9c6567f2e..6db6deb68 100644 --- a/nbs/models.autoformer.ipynb +++ b/nbs/models.autoformer.ipynb @@ -458,10 +458,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -508,10 +504,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super(Autoformer, self).__init__(h=h,\n", @@ -537,10 +529,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.bitcn.ipynb b/nbs/models.bitcn.ipynb index cd78bb194..8e9571de6 100644 --- a/nbs/models.bitcn.ipynb +++ b/nbs/models.bitcn.ipynb @@ -178,10 +178,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -221,10 +217,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super(BiTCN, self).__init__(\n", @@ -251,10 +243,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.deepar.ipynb b/nbs/models.deepar.ipynb index c25e27bf9..1f93be176 100644 --- a/nbs/models.deepar.ipynb +++ b/nbs/models.deepar.ipynb @@ -183,10 +183,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -231,10 +227,6 @@ " random_seed: int = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -274,10 +266,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.deepnpts.ipynb b/nbs/models.deepnpts.ipynb index 4f5e7ee9f..da83951b5 100644 --- a/nbs/models.deepnpts.ipynb +++ b/nbs/models.deepnpts.ipynb @@ -121,10 +121,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -166,10 +162,6 @@ " random_seed: int = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -206,10 +198,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.dilated_rnn.ipynb b/nbs/models.dilated_rnn.ipynb index 4b3bd374f..7c556be3d 100644 --- a/nbs/models.dilated_rnn.ipynb +++ b/nbs/models.dilated_rnn.ipynb @@ -390,10 +390,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -430,10 +426,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(DilatedRNN, self).__init__(\n", @@ -456,10 +448,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.dlinear.ipynb b/nbs/models.dlinear.ipynb index ea1a38a43..57edcc945 100644 --- a/nbs/models.dlinear.ipynb +++ b/nbs/models.dlinear.ipynb @@ -162,10 +162,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -203,10 +199,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs=None,\n", " **trainer_kwargs):\n", " super(DLinear, self).__init__(h=h,\n", @@ -232,10 +224,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.fedformer.ipynb b/nbs/models.fedformer.ipynb index 2268c058d..47a13e205 100644 --- a/nbs/models.fedformer.ipynb +++ b/nbs/models.fedformer.ipynb @@ -451,10 +451,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -500,10 +496,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(FEDformer, self).__init__(h=h,\n", @@ -528,10 +520,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs, \n", " **trainer_kwargs)\n", " # Architecture\n", diff --git a/nbs/models.gru.ipynb b/nbs/models.gru.ipynb index 7f0608a5f..b3210e198 100644 --- a/nbs/models.gru.ipynb +++ b/nbs/models.gru.ipynb @@ -134,10 +134,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -175,10 +171,6 @@ " random_seed=1,\n", " num_workers_loader=0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(GRU, self).__init__(\n", @@ -201,10 +193,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.informer.ipynb b/nbs/models.informer.ipynb index c8e30137c..1666abc67 100644 --- a/nbs/models.informer.ipynb +++ b/nbs/models.informer.ipynb @@ -306,10 +306,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -356,10 +352,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(Informer, self).__init__(h=h,\n", @@ -385,10 +377,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.itransformer.ipynb b/nbs/models.itransformer.ipynb index 5e134cfa0..f55a1927b 100644 --- a/nbs/models.itransformer.ipynb +++ b/nbs/models.itransformer.ipynb @@ -228,10 +228,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", @@ -273,10 +269,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " \n", @@ -299,10 +291,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.kan.ipynb b/nbs/models.kan.ipynb index ac7cc5e2b..93aa02fa3 100644 --- a/nbs/models.kan.ipynb +++ b/nbs/models.kan.ipynb @@ -362,8 +362,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -411,8 +409,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " \n", @@ -440,8 +436,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", " dataloader_kwargs = dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.lstm.ipynb b/nbs/models.lstm.ipynb index 3eb469306..464a539bb 100644 --- a/nbs/models.lstm.ipynb +++ b/nbs/models.lstm.ipynb @@ -121,10 +121,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -161,10 +157,6 @@ " random_seed = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(LSTM, self).__init__(\n", @@ -187,10 +179,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.mlp.ipynb b/nbs/models.mlp.ipynb index 46c09406f..075dd28e1 100644 --- a/nbs/models.mlp.ipynb +++ b/nbs/models.mlp.ipynb @@ -114,10 +114,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -153,10 +149,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -184,10 +176,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.mlpmultivariate.ipynb b/nbs/models.mlpmultivariate.ipynb index 71abdfb04..b6fb8e302 100644 --- a/nbs/models.mlpmultivariate.ipynb +++ b/nbs/models.mlpmultivariate.ipynb @@ -108,10 +108,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -143,10 +139,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -170,10 +162,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.nbeats.ipynb b/nbs/models.nbeats.ipynb index 9504770d5..5d28efdd3 100644 --- a/nbs/models.nbeats.ipynb +++ b/nbs/models.nbeats.ipynb @@ -270,10 +270,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -315,10 +311,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " \n", @@ -348,10 +340,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.nbeatsx.ipynb b/nbs/models.nbeatsx.ipynb index 9952c3cf9..5db08fec5 100644 --- a/nbs/models.nbeatsx.ipynb +++ b/nbs/models.nbeatsx.ipynb @@ -414,10 +414,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -465,10 +461,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs,\n", " ):\n", @@ -502,10 +494,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.nhits.ipynb b/nbs/models.nhits.ipynb index e844f4660..9b214ce62 100644 --- a/nbs/models.nhits.ipynb +++ b/nbs/models.nhits.ipynb @@ -303,10 +303,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -354,10 +350,6 @@ " random_seed: int = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -385,10 +377,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.nlinear.ipynb b/nbs/models.nlinear.ipynb index b55d42204..1b922b883 100644 --- a/nbs/models.nlinear.ipynb +++ b/nbs/models.nlinear.ipynb @@ -102,10 +102,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -142,10 +138,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(NLinear, self).__init__(h=h,\n", @@ -171,10 +163,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.patchtst.ipynb b/nbs/models.patchtst.ipynb index 31064cc24..1088bc6d4 100644 --- a/nbs/models.patchtst.ipynb +++ b/nbs/models.patchtst.ipynb @@ -662,10 +662,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -719,10 +715,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(PatchTST, self).__init__(h=h,\n", @@ -748,10 +740,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs) \n", "\n", diff --git a/nbs/models.rmok.ipynb b/nbs/models.rmok.ipynb index 017477c13..6245d0eb7 100644 --- a/nbs/models.rmok.ipynb +++ b/nbs/models.rmok.ipynb @@ -359,10 +359,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -401,10 +397,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " \n", @@ -427,10 +419,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.rnn.ipynb b/nbs/models.rnn.ipynb index f5e1a67b9..bd856c014 100644 --- a/nbs/models.rnn.ipynb +++ b/nbs/models.rnn.ipynb @@ -125,10 +125,6 @@ " `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
\n", " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `alias`: str, optional, Custom name of the model.
\n", "\n", @@ -168,10 +164,6 @@ " random_seed=1,\n", " num_workers_loader=0,\n", " drop_last_loader=False,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " super(RNN, self).__init__(\n", @@ -194,10 +186,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.softs.ipynb b/nbs/models.softs.ipynb index 978f3c2c2..05d30886f 100644 --- a/nbs/models.softs.ipynb +++ b/nbs/models.softs.ipynb @@ -200,10 +200,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", @@ -243,10 +239,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " \n", @@ -269,10 +261,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.stemgnn.ipynb b/nbs/models.stemgnn.ipynb index b2222fc1c..54aad7471 100644 --- a/nbs/models.stemgnn.ipynb +++ b/nbs/models.stemgnn.ipynb @@ -204,10 +204,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -241,10 +237,6 @@ " random_seed: int = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -268,10 +260,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " # Quick fix for now, fix the model later.\n", diff --git a/nbs/models.tcn.ipynb b/nbs/models.tcn.ipynb index dee324513..25b6085de 100644 --- a/nbs/models.tcn.ipynb +++ b/nbs/models.tcn.ipynb @@ -126,10 +126,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", @@ -166,10 +162,6 @@ " random_seed: int = 1,\n", " num_workers_loader = 0,\n", " drop_last_loader = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " super(TCN, self).__init__(\n", @@ -192,10 +184,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs = dataloader_kwargs,\n", " **trainer_kwargs\n", " )\n", diff --git a/nbs/models.tft.ipynb b/nbs/models.tft.ipynb index bae287acf..6ded2b3bb 100644 --- a/nbs/models.tft.ipynb +++ b/nbs/models.tft.ipynb @@ -696,10 +696,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -744,10 +740,6 @@ " num_workers_loader=0,\n", " drop_last_loader=False,\n", " random_seed: int = 1,\n", - " optimizer=None,\n", - " optimizer_kwargs=None,\n", - " lr_scheduler=None,\n", - " lr_scheduler_kwargs=None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs,\n", " ):\n", @@ -776,10 +768,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs,\n", " )\n", diff --git a/nbs/models.tide.ipynb b/nbs/models.tide.ipynb index 6a16d2b2b..f635beec0 100644 --- a/nbs/models.tide.ipynb +++ b/nbs/models.tide.ipynb @@ -167,10 +167,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -216,10 +212,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -248,10 +240,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs\n", " ) \n", diff --git a/nbs/models.timellm.ipynb b/nbs/models.timellm.ipynb index 67f4a03d1..a05c33156 100755 --- a/nbs/models.timellm.ipynb +++ b/nbs/models.timellm.ipynb @@ -291,10 +291,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -348,10 +344,6 @@ " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", " random_seed: int = 1,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(TimeLLM, self).__init__(h=h,\n", @@ -376,10 +368,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.timemixer.ipynb b/nbs/models.timemixer.ipynb index 9bfdd9cc5..207d44b29 100644 --- a/nbs/models.timemixer.ipynb +++ b/nbs/models.timemixer.ipynb @@ -360,10 +360,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -410,10 +406,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " \n", @@ -436,10 +428,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " \n", diff --git a/nbs/models.timesnet.ipynb b/nbs/models.timesnet.ipynb index 37e5d46e4..98eefe2f6 100644 --- a/nbs/models.timesnet.ipynb +++ b/nbs/models.timesnet.ipynb @@ -263,12 +263,6 @@ " Workers to be used by `TimeSeriesDataLoader`.\n", " drop_last_loader : bool (default=False)\n", " If True `TimeSeriesDataLoader` drops last non-full batch.\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional (default=None)\n", - " User specified optimizer instead of the default choice (Adam).\n", - " `optimizer_kwargs`: dict, optional (defualt=None)\n", - " List of parameters used by the user specified `optimizer`.\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional (default=None)\n", " List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " **trainer_kwargs\n", @@ -314,10 +308,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None, \n", " dataloader_kwargs = None, \n", " **trainer_kwargs):\n", " super(TimesNet, self).__init__(h=h,\n", @@ -343,11 +333,7 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs, \n", - " dataloader_kwargs=dataloader_kwargs, \n", + " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.tsmixer.ipynb b/nbs/models.tsmixer.ipynb index 94a9e4125..c255c233c 100644 --- a/nbs/models.tsmixer.ipynb +++ b/nbs/models.tsmixer.ipynb @@ -250,10 +250,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -291,10 +287,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -318,10 +310,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/nbs/models.tsmixerx.ipynb b/nbs/models.tsmixerx.ipynb index cb0ba72b6..d1f220823 100644 --- a/nbs/models.tsmixerx.ipynb +++ b/nbs/models.tsmixerx.ipynb @@ -274,10 +274,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -315,10 +311,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", "\n", @@ -342,10 +334,6 @@ " random_seed=random_seed,\n", " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", " # Reversible InstanceNormalization layer\n", diff --git a/nbs/models.vanillatransformer.ipynb b/nbs/models.vanillatransformer.ipynb index b76cc9ba2..56cb5e33b 100644 --- a/nbs/models.vanillatransformer.ipynb +++ b/nbs/models.vanillatransformer.ipynb @@ -198,10 +198,6 @@ " `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
\n", " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", - " `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
\n", - " `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
\n", - " `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
\n", - " `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", @@ -245,10 +241,6 @@ " random_seed: int = 1,\n", " num_workers_loader: int = 0,\n", " drop_last_loader: bool = False,\n", - " optimizer = None,\n", - " optimizer_kwargs = None,\n", - " lr_scheduler = None,\n", - " lr_scheduler_kwargs = None,\n", " dataloader_kwargs = None,\n", " **trainer_kwargs):\n", " super(VanillaTransformer, self).__init__(h=h,\n", @@ -273,10 +265,6 @@ " num_workers_loader=num_workers_loader,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " optimizer=optimizer,\n", - " optimizer_kwargs=optimizer_kwargs,\n", - " lr_scheduler=lr_scheduler,\n", - " lr_scheduler_kwargs=lr_scheduler_kwargs,\n", " dataloader_kwargs=dataloader_kwargs,\n", " **trainer_kwargs)\n", "\n", diff --git a/neuralforecast/common/_base_model.py b/neuralforecast/common/_base_model.py index 59f7d4a14..e6e7db0b6 100644 --- a/neuralforecast/common/_base_model.py +++ b/neuralforecast/common/_base_model.py @@ -8,7 +8,6 @@ import random import warnings from contextlib import contextmanager -from copy import deepcopy from dataclasses import dataclass import fsspec @@ -72,10 +71,6 @@ def __init__( random_seed, loss, valid_loss, - optimizer, - optimizer_kwargs, - lr_scheduler, - lr_scheduler_kwargs, futr_exog_list, hist_exog_list, stat_exog_list, @@ -101,26 +96,6 @@ def __init__( self.train_trajectories = [] self.valid_trajectories = [] - # Optimization - if optimizer is not None and not issubclass(optimizer, torch.optim.Optimizer): - raise TypeError( - "optimizer is not a valid subclass of torch.optim.Optimizer" - ) - self.optimizer = optimizer - self.optimizer_kwargs = optimizer_kwargs if optimizer_kwargs is not None else {} - - # lr scheduler - if lr_scheduler is not None and not issubclass( - lr_scheduler, torch.optim.lr_scheduler.LRScheduler - ): - raise TypeError( - "lr_scheduler is not a valid subclass of torch.optim.lr_scheduler.LRScheduler" - ) - self.lr_scheduler = lr_scheduler - self.lr_scheduler_kwargs = ( - lr_scheduler_kwargs if lr_scheduler_kwargs is not None else {} - ) - # customized by set_configure_optimizers() self.config_optimizers = None @@ -389,47 +364,19 @@ def on_fit_start(self): def configure_optimizers(self): if self.config_optimizers is not None: + # return the customized optimizer settings if specified return self.config_optimizers - if self.optimizer: - optimizer_signature = inspect.signature(self.optimizer) - optimizer_kwargs = deepcopy(self.optimizer_kwargs) - if "lr" in optimizer_signature.parameters: - if "lr" in optimizer_kwargs: - warnings.warn( - "ignoring learning rate passed in optimizer_kwargs, using the model's learning rate" - ) - optimizer_kwargs["lr"] = self.learning_rate - optimizer = self.optimizer(params=self.parameters(), **optimizer_kwargs) - else: - if self.optimizer_kwargs: - warnings.warn( - "ignoring optimizer_kwargs as the optimizer is not specified" - ) - optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) - - lr_scheduler = {"frequency": 1, "interval": "step"} - if self.lr_scheduler: - lr_scheduler_signature = inspect.signature(self.lr_scheduler) - lr_scheduler_kwargs = deepcopy(self.lr_scheduler_kwargs) - if "optimizer" in lr_scheduler_signature.parameters: - if "optimizer" in lr_scheduler_kwargs: - warnings.warn( - "ignoring optimizer passed in lr_scheduler_kwargs, using the model's optimizer" - ) - del lr_scheduler_kwargs["optimizer"] - lr_scheduler["scheduler"] = self.lr_scheduler( - optimizer=optimizer, **lr_scheduler_kwargs - ) - else: - if self.lr_scheduler_kwargs: - warnings.warn( - "ignoring lr_scheduler_kwargs as the lr_scheduler is not specified" - ) - lr_scheduler["scheduler"] = torch.optim.lr_scheduler.StepLR( + # default choice + optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) + scheduler = { + "scheduler": torch.optim.lr_scheduler.StepLR( optimizer=optimizer, step_size=self.lr_decay_steps, gamma=0.5 - ) - return {"optimizer": optimizer, "lr_scheduler": lr_scheduler} + ), + "frequency": 1, + "interval": "step", + } + return {"optimizer": optimizer, "lr_scheduler": scheduler} def set_configure_optimizers( self, diff --git a/neuralforecast/common/_base_multivariate.py b/neuralforecast/common/_base_multivariate.py index 0fdc3b94d..5acdf75eb 100644 --- a/neuralforecast/common/_base_multivariate.py +++ b/neuralforecast/common/_base_multivariate.py @@ -50,10 +50,6 @@ def __init__( drop_last_loader=False, random_seed=1, alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -61,10 +57,6 @@ def __init__( random_seed=random_seed, loss=loss, valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, diff --git a/neuralforecast/common/_base_recurrent.py b/neuralforecast/common/_base_recurrent.py index 0479996c1..604eaddb8 100644 --- a/neuralforecast/common/_base_recurrent.py +++ b/neuralforecast/common/_base_recurrent.py @@ -50,10 +50,6 @@ def __init__( drop_last_loader=False, random_seed=1, alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -61,10 +57,6 @@ def __init__( random_seed=random_seed, loss=loss, valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, diff --git a/neuralforecast/common/_base_windows.py b/neuralforecast/common/_base_windows.py index dd4a4c869..f83936fcb 100644 --- a/neuralforecast/common/_base_windows.py +++ b/neuralforecast/common/_base_windows.py @@ -53,10 +53,6 @@ def __init__( drop_last_loader=False, random_seed=1, alias=None, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -64,10 +60,6 @@ def __init__( random_seed=random_seed, loss=loss, valid_loss=valid_loss, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, futr_exog_list=futr_exog_list, hist_exog_list=hist_exog_list, stat_exog_list=stat_exog_list, diff --git a/neuralforecast/models/autoformer.py b/neuralforecast/models/autoformer.py index 815e57bc2..ffa081907 100644 --- a/neuralforecast/models/autoformer.py +++ b/neuralforecast/models/autoformer.py @@ -442,10 +442,6 @@ class Autoformer(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -494,10 +490,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -525,10 +517,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/bitcn.py b/neuralforecast/models/bitcn.py index 53a775838..ed48fa5e0 100644 --- a/neuralforecast/models/bitcn.py +++ b/neuralforecast/models/bitcn.py @@ -116,10 +116,6 @@ class BiTCN(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -161,10 +157,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -192,10 +184,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/deepar.py b/neuralforecast/models/deepar.py index 3d2a2fd94..06e0860c2 100644 --- a/neuralforecast/models/deepar.py +++ b/neuralforecast/models/deepar.py @@ -87,10 +87,6 @@ class DeepAR(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -139,10 +135,6 @@ def __init__( random_seed: int = 1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -188,10 +180,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/deepnpts.py b/neuralforecast/models/deepnpts.py index f958e71be..8ba95a2f8 100644 --- a/neuralforecast/models/deepnpts.py +++ b/neuralforecast/models/deepnpts.py @@ -49,10 +49,6 @@ class DeepNPTS(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -96,10 +92,6 @@ def __init__( random_seed: int = 1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -142,10 +134,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/dilated_rnn.py b/neuralforecast/models/dilated_rnn.py index d56cc5f08..a56d3ed0c 100644 --- a/neuralforecast/models/dilated_rnn.py +++ b/neuralforecast/models/dilated_rnn.py @@ -317,10 +317,6 @@ class DilatedRNN(BaseRecurrent): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -359,10 +355,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -386,10 +378,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/dlinear.py b/neuralforecast/models/dlinear.py index 17965c869..c0ba3773c 100644 --- a/neuralforecast/models/dlinear.py +++ b/neuralforecast/models/dlinear.py @@ -75,10 +75,6 @@ class DLinear(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -118,10 +114,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -149,10 +141,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/fedformer.py b/neuralforecast/models/fedformer.py index 89e2fe3ef..2073fde45 100644 --- a/neuralforecast/models/fedformer.py +++ b/neuralforecast/models/fedformer.py @@ -440,10 +440,6 @@ class FEDformer(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -491,10 +487,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -521,10 +513,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/gru.py b/neuralforecast/models/gru.py index 9a6d92325..da24a52e7 100644 --- a/neuralforecast/models/gru.py +++ b/neuralforecast/models/gru.py @@ -52,10 +52,6 @@ class GRU(BaseRecurrent): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -95,10 +91,6 @@ def __init__( random_seed=1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -122,10 +114,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/informer.py b/neuralforecast/models/informer.py index 8b115cebd..82ad48f55 100644 --- a/neuralforecast/models/informer.py +++ b/neuralforecast/models/informer.py @@ -226,10 +226,6 @@ class Informer(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -278,10 +274,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -309,10 +301,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/itransformer.py b/neuralforecast/models/itransformer.py index 9e577a71d..b651ca730 100644 --- a/neuralforecast/models/itransformer.py +++ b/neuralforecast/models/itransformer.py @@ -134,10 +134,6 @@ class iTransformer(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -180,10 +176,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -208,10 +200,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/kan.py b/neuralforecast/models/kan.py index 29d7b1d00..74ea0b099 100644 --- a/neuralforecast/models/kan.py +++ b/neuralforecast/models/kan.py @@ -284,8 +284,6 @@ class KAN(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -334,8 +332,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -365,8 +361,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/lstm.py b/neuralforecast/models/lstm.py index e89db3628..2f1e832e1 100644 --- a/neuralforecast/models/lstm.py +++ b/neuralforecast/models/lstm.py @@ -50,10 +50,6 @@ class LSTM(BaseRecurrent): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -92,10 +88,6 @@ def __init__( random_seed=1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -119,10 +111,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/mlp.py b/neuralforecast/models/mlp.py index 0794ac7c3..40cc8ce31 100644 --- a/neuralforecast/models/mlp.py +++ b/neuralforecast/models/mlp.py @@ -49,10 +49,6 @@ class MLP(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -90,10 +86,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -123,10 +115,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/mlpmultivariate.py b/neuralforecast/models/mlpmultivariate.py index 7554bb44d..b25e6d2e7 100644 --- a/neuralforecast/models/mlpmultivariate.py +++ b/neuralforecast/models/mlpmultivariate.py @@ -43,10 +43,6 @@ class MLPMultivariate(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -80,10 +76,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -109,10 +101,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/nbeats.py b/neuralforecast/models/nbeats.py index 02280fb79..0957abffc 100644 --- a/neuralforecast/models/nbeats.py +++ b/neuralforecast/models/nbeats.py @@ -228,10 +228,6 @@ class NBEATS(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -275,10 +271,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -310,10 +302,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/nbeatsx.py b/neuralforecast/models/nbeatsx.py index 811392a66..4fb461db2 100644 --- a/neuralforecast/models/nbeatsx.py +++ b/neuralforecast/models/nbeatsx.py @@ -315,10 +315,6 @@ class NBEATSx(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -366,10 +362,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -404,10 +396,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/nhits.py b/neuralforecast/models/nhits.py index ce5caeaaa..1d1bb9dd1 100644 --- a/neuralforecast/models/nhits.py +++ b/neuralforecast/models/nhits.py @@ -226,10 +226,6 @@ class NHITS(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -279,10 +275,6 @@ def __init__( random_seed: int = 1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -312,10 +304,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/nlinear.py b/neuralforecast/models/nlinear.py index 4909ddbd3..3480fc48c 100644 --- a/neuralforecast/models/nlinear.py +++ b/neuralforecast/models/nlinear.py @@ -39,10 +39,6 @@ class NLinear(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -81,10 +77,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -112,10 +104,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/patchtst.py b/neuralforecast/models/patchtst.py index 0b2029fd4..3d92a532d 100644 --- a/neuralforecast/models/patchtst.py +++ b/neuralforecast/models/patchtst.py @@ -836,10 +836,6 @@ class PatchTST(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -895,10 +891,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -926,10 +918,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/rmok.py b/neuralforecast/models/rmok.py index 35db80aca..4061f36c8 100644 --- a/neuralforecast/models/rmok.py +++ b/neuralforecast/models/rmok.py @@ -284,10 +284,6 @@ class RMoK(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -327,10 +323,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -355,10 +347,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/rnn.py b/neuralforecast/models/rnn.py index f5d60f42a..f950c5d99 100644 --- a/neuralforecast/models/rnn.py +++ b/neuralforecast/models/rnn.py @@ -50,10 +50,6 @@ class RNN(BaseRecurrent): `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.
`num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`alias`: str, optional, Custom name of the model.
@@ -95,10 +91,6 @@ def __init__( random_seed=1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -122,10 +114,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/softs.py b/neuralforecast/models/softs.py index cb425200a..6112c3d80 100644 --- a/neuralforecast/models/softs.py +++ b/neuralforecast/models/softs.py @@ -109,10 +109,6 @@ class SOFTS(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -153,10 +149,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -181,10 +173,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/stemgnn.py b/neuralforecast/models/stemgnn.py index 85a014e65..4fa2ccf40 100644 --- a/neuralforecast/models/stemgnn.py +++ b/neuralforecast/models/stemgnn.py @@ -169,10 +169,6 @@ class StemGNN(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -208,10 +204,6 @@ def __init__( random_seed: int = 1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -237,10 +229,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/tcn.py b/neuralforecast/models/tcn.py index fd900512c..fdbd1cdd1 100644 --- a/neuralforecast/models/tcn.py +++ b/neuralforecast/models/tcn.py @@ -47,10 +47,6 @@ class TCN(BaseRecurrent): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -89,10 +85,6 @@ def __init__( random_seed: int = 1, num_workers_loader=0, drop_last_loader=False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -116,10 +108,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/tft.py b/neuralforecast/models/tft.py index f96d5646b..faadec9d5 100644 --- a/neuralforecast/models/tft.py +++ b/neuralforecast/models/tft.py @@ -457,10 +457,6 @@ class TFT(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -505,10 +501,6 @@ def __init__( num_workers_loader=0, drop_last_loader=False, random_seed: int = 1, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -537,10 +529,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/tide.py b/neuralforecast/models/tide.py index ec98c2b13..257972570 100644 --- a/neuralforecast/models/tide.py +++ b/neuralforecast/models/tide.py @@ -81,10 +81,6 @@ class TiDE(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -132,10 +128,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -165,10 +157,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/timellm.py b/neuralforecast/models/timellm.py index aa9276f72..93bd52c84 100644 --- a/neuralforecast/models/timellm.py +++ b/neuralforecast/models/timellm.py @@ -214,10 +214,6 @@ class TimeLLM(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -272,10 +268,6 @@ def __init__( num_workers_loader: int = 0, drop_last_loader: bool = False, random_seed: int = 1, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -302,10 +294,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/timemixer.py b/neuralforecast/models/timemixer.py index 5585539bd..57e081ea5 100644 --- a/neuralforecast/models/timemixer.py +++ b/neuralforecast/models/timemixer.py @@ -285,10 +285,6 @@ class TimeMixer(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -336,10 +332,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -364,10 +356,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) diff --git a/neuralforecast/models/timesnet.py b/neuralforecast/models/timesnet.py index aab548382..87ed9ca56 100644 --- a/neuralforecast/models/timesnet.py +++ b/neuralforecast/models/timesnet.py @@ -182,12 +182,6 @@ class TimesNet(BaseWindows): Workers to be used by `TimeSeriesDataLoader`. drop_last_loader : bool (default=False) If True `TimeSeriesDataLoader` drops last non-full batch. - `optimizer`: Subclass of 'torch.optim.Optimizer', optional (default=None) - User specified optimizer instead of the default choice (Adam). - `optimizer_kwargs`: dict, optional (defualt=None) - List of parameters used by the user specified `optimizer`. - `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional (default=None) List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
**trainer_kwargs @@ -235,10 +229,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -266,10 +256,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixer.py b/neuralforecast/models/tsmixer.py index 0d68e1e4c..23a3e4b99 100644 --- a/neuralforecast/models/tsmixer.py +++ b/neuralforecast/models/tsmixer.py @@ -160,10 +160,6 @@ class TSMixer(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -203,10 +199,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -232,10 +224,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixerx.py b/neuralforecast/models/tsmixerx.py index 24897d442..b8fed092f 100644 --- a/neuralforecast/models/tsmixerx.py +++ b/neuralforecast/models/tsmixerx.py @@ -188,10 +188,6 @@ class TSMixerx(BaseMultivariate): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -231,10 +227,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs ): @@ -260,10 +252,6 @@ def __init__( random_seed=random_seed, num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs ) diff --git a/neuralforecast/models/vanillatransformer.py b/neuralforecast/models/vanillatransformer.py index 69fcc9c4d..c41eec20b 100644 --- a/neuralforecast/models/vanillatransformer.py +++ b/neuralforecast/models/vanillatransformer.py @@ -117,10 +117,6 @@ class VanillaTransformer(BaseWindows): `num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
- `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).
- `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.
- `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).
- `lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
@@ -166,10 +162,6 @@ def __init__( random_seed: int = 1, num_workers_loader: int = 0, drop_last_loader: bool = False, - optimizer=None, - optimizer_kwargs=None, - lr_scheduler=None, - lr_scheduler_kwargs=None, dataloader_kwargs=None, **trainer_kwargs, ): @@ -196,10 +188,6 @@ def __init__( num_workers_loader=num_workers_loader, drop_last_loader=drop_last_loader, random_seed=random_seed, - optimizer=optimizer, - optimizer_kwargs=optimizer_kwargs, - lr_scheduler=lr_scheduler, - lr_scheduler_kwargs=lr_scheduler_kwargs, dataloader_kwargs=dataloader_kwargs, **trainer_kwargs, ) From eb17a4d877172f7cc637fd79c0362a42f3385226 Mon Sep 17 00:00:00 2001 From: t-minus Date: Wed, 11 Dec 2024 18:30:35 +0000 Subject: [PATCH 5/8] Fix test --- nbs/core.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nbs/core.ipynb b/nbs/core.ipynb index 3a83b52a8..0f34d9ad6 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -3172,12 +3172,12 @@ " mean = default_optimizer_predict.loc[:, nf_model.__name__].mean()\n", "\n", " # using a customized optimizer\n", + " models2 = [nf_model(**params)]\n", " optimizer = torch.optim.Adadelta(params=models2[0].parameters(), rho=0.75)\n", " scheduler=torch.optim.lr_scheduler.StepLR(\n", " optimizer=optimizer, step_size=10e7, gamma=0.5\n", " )\n", "\n", - " models2 = [nf_model(**params)]\n", " models2[0].set_configure_optimizers(\n", " optimizer=optimizer,\n", " scheduler=scheduler,\n", From f5a4f6285c3c36e7fb00b21bb3ecec42cde70f1f Mon Sep 17 00:00:00 2001 From: t-minus Date: Wed, 18 Dec 2024 22:23:53 +0000 Subject: [PATCH 6/8] Correction to the path in contributing.md note --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d66c8d1e..656db5861 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -69,7 +69,7 @@ nbdev_export If you're working on the local interface you can just use `nbdev_test --n_workers 1 --do_print --timing`. ### Cleaning notebooks -Since the notebooks output cells can vary from run to run (even if they produce the same outputs) the notebooks are cleaned before committing them. Please make sure to run `nbdev_clean --clear_all` before committing your changes. If you clean the library's notebooks with this command please backtrack the changes you make to the example notebooks `git checkout nbs/examples`, unless you intend to change the examples. +Since the notebooks output cells can vary from run to run (even if they produce the same outputs) the notebooks are cleaned before committing them. Please make sure to run `nbdev_clean --clear_all` before committing your changes. If you clean the library's notebooks with this command please backtrack the changes you make to the example notebooks `git checkout nbs/docs`, unless you intend to change the examples. ## Do you want to contribute to the documentation? @@ -78,6 +78,6 @@ Since the notebooks output cells can vary from run to run (even if they produce 1. Find the relevant notebook. 2. Make your changes. 3. Run all cells. - 4. If you are modifying library notebooks (not in `nbs/examples`), clean all outputs using `Edit > Clear All Outputs`. + 4. If you are modifying library notebooks (not in `nbs/docs`), clean all outputs using `Edit > Clear All Outputs`. 5. Run `nbdev_preview`. 6. Clean the notebook metadata using `nbdev_clean`. From 3bab7c000dac78189de369408c64fcf505df192f Mon Sep 17 00:00:00 2001 From: t-minus Date: Sun, 22 Dec 2024 02:29:22 +0000 Subject: [PATCH 7/8] Review: Allow users to specifify the configure_optimizers() for lightning by passing the specified function call --- nbs/common.base_model.ipynb | 50 +------- nbs/common.base_multivariate.ipynb | 4 +- nbs/common.base_recurrent.ipynb | 4 +- nbs/common.base_windows.ipynb | 4 +- nbs/core.ipynb | 133 ++++++-------------- neuralforecast/common/_base_model.py | 52 +------- neuralforecast/common/_base_multivariate.py | 2 + neuralforecast/common/_base_recurrent.py | 2 + neuralforecast/common/_base_windows.py | 2 + 9 files changed, 65 insertions(+), 188 deletions(-) diff --git a/nbs/common.base_model.ipynb b/nbs/common.base_model.ipynb index fc65b239c..4ee3ff9df 100644 --- a/nbs/common.base_model.ipynb +++ b/nbs/common.base_model.ipynb @@ -88,7 +88,7 @@ " kaiming_normal = nn.init.kaiming_normal_\n", " xavier_uniform = nn.init.xavier_uniform_\n", " xavier_normal = nn.init.xavier_normal_\n", - " \n", + " \n", " nn.init.kaiming_uniform_ = noop\n", " nn.init.kaiming_normal_ = noop\n", " nn.init.xavier_uniform_ = noop\n", @@ -125,6 +125,7 @@ " stat_exog_list,\n", " max_steps,\n", " early_stop_patience_steps,\n", + " config_optimizers=None,\n", " **trainer_kwargs,\n", " ):\n", " super().__init__()\n", @@ -145,8 +146,8 @@ " self.train_trajectories = []\n", " self.valid_trajectories = []\n", "\n", - " # customized by set_configure_optimizers()\n", - " self.config_optimizers = None\n", + " # function has the same signature as LightningModule's configure_optimizer\n", + " self.config_optimizers = config_optimizers\n", "\n", " # Variables\n", " self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else []\n", @@ -386,7 +387,7 @@ " def configure_optimizers(self):\n", " if self.config_optimizers is not None:\n", " # return the customized optimizer settings if specified\n", - " return self.config_optimizers\n", + " return self.config_optimizers(self)\n", " \n", " # default choice\n", " optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n", @@ -399,47 +400,6 @@ " }\n", " return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler}\n", "\n", - " def set_configure_optimizers(\n", - " self, \n", - " optimizer=None,\n", - " scheduler=None,\n", - " interval='step',\n", - " frequency=1,\n", - " monitor='val_loss',\n", - " strict=True,\n", - " name=None\n", - " ):\n", - " \"\"\"Helper function to customize the lr_scheduler_config as detailed in \n", - " https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#configure-optimizers\n", - "\n", - " Calling set_configure_optimizers() with valid `optimizer`, `scheduler` shall modify the returned \n", - " dictionary of key='optimizer', key='lr_scheduler' in configure_optimizers().\n", - " Note that the default choice of `interval` in set_configure_optiizers() is 'step',\n", - " which differs from the choice of 'epoch' used in lightning_module. \n", - " \"\"\"\n", - " lr_scheduler_config = {\n", - " 'interval': interval,\n", - " 'frequency': frequency,\n", - " 'monitor': monitor,\n", - " 'strict': strict,\n", - " 'name': name,\n", - " }\n", - "\n", - " if scheduler is not None and optimizer is not None:\n", - " if not isinstance(scheduler, torch.optim.lr_scheduler.LRScheduler):\n", - " raise TypeError(\"scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler\")\n", - " if not isinstance(optimizer, torch.optim.Optimizer):\n", - " raise TypeError(\"optimizer is not a valid instance of torch.optim.Optimizer\") \n", - " \n", - " lr_scheduler_config[\"scheduler\"] = scheduler\n", - " self.config_optimizers = {\n", - " 'optimizer': optimizer,\n", - " 'lr_scheduler': lr_scheduler_config,\n", - " }\n", - " else:\n", - " # falls back to default option as specified in configure_optimizers()\n", - " self.config_optimizers = None\n", - "\n", " def get_test_size(self):\n", " return self.test_size\n", "\n", diff --git a/nbs/common.base_multivariate.ipynb b/nbs/common.base_multivariate.ipynb index 3e072fca4..f642ab7c9 100644 --- a/nbs/common.base_multivariate.ipynb +++ b/nbs/common.base_multivariate.ipynb @@ -105,16 +105,18 @@ " random_seed=1, \n", " alias=None,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", " loss=loss,\n", - " valid_loss=valid_loss, \n", + " valid_loss=valid_loss,\n", " futr_exog_list=futr_exog_list,\n", " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", " max_steps=max_steps,\n", " early_stop_patience_steps=early_stop_patience_steps,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs,\n", " )\n", "\n", diff --git a/nbs/common.base_recurrent.ipynb b/nbs/common.base_recurrent.ipynb index c03e1cfeb..d4d5432b4 100644 --- a/nbs/common.base_recurrent.ipynb +++ b/nbs/common.base_recurrent.ipynb @@ -111,6 +111,7 @@ " random_seed=1, \n", " alias=None,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", @@ -120,7 +121,8 @@ " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", " max_steps=max_steps,\n", - " early_stop_patience_steps=early_stop_patience_steps, \n", + " early_stop_patience_steps=early_stop_patience_steps,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs,\n", " )\n", "\n", diff --git a/nbs/common.base_windows.ipynb b/nbs/common.base_windows.ipynb index a2ded93af..01a4e749f 100644 --- a/nbs/common.base_windows.ipynb +++ b/nbs/common.base_windows.ipynb @@ -115,6 +115,7 @@ " random_seed=1,\n", " alias=None,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super().__init__(\n", " random_seed=random_seed,\n", @@ -124,7 +125,8 @@ " hist_exog_list=hist_exog_list,\n", " stat_exog_list=stat_exog_list,\n", " max_steps=max_steps,\n", - " early_stop_patience_steps=early_stop_patience_steps, \n", + " early_stop_patience_steps=early_stop_patience_steps,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs,\n", " )\n", "\n", diff --git a/nbs/core.ipynb b/nbs/core.ipynb index bd3c5d83b..3c7f9535c 100644 --- a/nbs/core.ipynb +++ b/nbs/core.ipynb @@ -3054,6 +3054,21 @@ "# test customized optimizer behavior such that the user defined optimizer result should differ from default\n", "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", + "def custom_optimizer(base_model):\n", + " optimizer = torch.optim.Adadelta(params=base_model.parameters(), rho=0.75)\n", + " scheduler=torch.optim.lr_scheduler.StepLR(\n", + " optimizer=optimizer, step_size=10e7, gamma=0.5\n", + " )\n", + " scheduler_config = {\n", + " 'scheduler': scheduler,\n", + " 'interval': 'step',\n", + " 'frequency': 1,\n", + " 'monitor': 'val_loss',\n", + " 'strict': True,\n", + " 'name': None,\n", + " }\n", + " return {'optimizer': optimizer, 'lr_scheduler': scheduler_config}\n", + "\n", "for nf_model in [NHITS, RNN, StemGNN]:\n", " # default optimizer is based on Adam\n", " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 1}\n", @@ -3065,18 +3080,9 @@ " default_optimizer_predict = nf.predict()\n", " mean = default_optimizer_predict.loc[:, nf_model.__name__].mean()\n", "\n", - " # using a customized optimizer\n", + " # employ custom optimizer\n", + " params.update({'config_optimizers': custom_optimizer})\n", " models2 = [nf_model(**params)]\n", - " optimizer = torch.optim.Adadelta(params=models2[0].parameters(), rho=0.75)\n", - " scheduler=torch.optim.lr_scheduler.StepLR(\n", - " optimizer=optimizer, step_size=10e7, gamma=0.5\n", - " )\n", - "\n", - " models2[0].set_configure_optimizers(\n", - " optimizer=optimizer,\n", - " scheduler=scheduler,\n", - " )\n", - "\n", " nf2 = NeuralForecast(models=models2, freq='M')\n", " nf2.fit(AirPassengersPanel_train)\n", " customized_optimizer_predict = nf2.predict()\n", @@ -3085,50 +3091,6 @@ " assert mean2 != mean" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "3db3fe1e", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if the user-defined optimizer is not a subclass of torch.optim.optimizer, failed with exception\n", - "# tests cover different types of base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "\n", - "for model_name in [NHITS, RNN, StemGNN]:\n", - " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 10}\n", - " if nf_model.__name__ == \"StemGNN\":\n", - " params.update({\"n_series\": 2})\n", - "\n", - " model = model_name(**params) \n", - " optimizer = torch.nn.Module()\n", - " scheduler = torch.optim.lr_scheduler.StepLR(\n", - " optimizer=torch.optim.Adam(model.parameters()), step_size=10e7, gamma=0.5\n", - " ) \n", - " test_fail(lambda: model.set_configure_optimizers(optimizer=optimizer, scheduler=scheduler), contains=\"optimizer is not a valid instance of torch.optim.Optimizer\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "54c7b5e2", - "metadata": {}, - "outputs": [], - "source": [ - "#| hide\n", - "# test that if the user-defined scheduler is not a subclass of torch.optim.lr_scheduler, failed with exception\n", - "# tests cover different types of base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", - "\n", - "for model_name in [NHITS, RNN, StemGNN]:\n", - " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 10}\n", - " if nf_model.__name__ == \"StemGNN\":\n", - " params.update({\"n_series\": 2})\n", - " model = model_name(**params)\n", - " optimizer = torch.optim.Adam(model.parameters())\n", - " test_fail(lambda: model.set_configure_optimizers(optimizer=optimizer, scheduler=torch.nn.Module), contains=\"scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -3242,8 +3204,27 @@ "source": [ "#| hide\n", "# test customized lr_scheduler behavior such that the user defined lr_scheduler result should differ from default\n", + "# we have full control on the optimization behavior such that ReduceLROnPlateau can be supported\n", + "# by passing the monitor parameter as well\n", "# tests consider models implemented using different base classes such as BaseWindows, BaseRecurrent, BaseMultivariate\n", "\n", + "def custom_optimizer(base_model):\n", + " optimizer = torch.optim.Adadelta(params=base_model.parameters(), rho=0.75)\n", + "\n", + " # test ReduceLROnPlateau\n", + " scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n", + " optimizer, mode='min', factor=0.5, patience=2,\n", + " ) \n", + " scheduler_config = {\n", + " 'scheduler': scheduler,\n", + " 'interval': 'step',\n", + " 'frequency': 1,\n", + " 'monitor': 'train_loss', # note that train_loss is used instead val_loss\n", + " 'strict': True,\n", + " 'name': None,\n", + " }\n", + " return {'optimizer': optimizer, 'lr_scheduler': scheduler_config}\n", + "\n", "for nf_model in [NHITS, RNN, StemGNN]:\n", " params = {\"h\": 12, \"input_size\": 24, \"max_steps\": 2}\n", " if nf_model.__name__ == \"StemGNN\":\n", @@ -3254,46 +3235,14 @@ " default_predict = nf.predict()\n", " mean = default_predict.loc[:, nf_model.__name__].mean()\n", "\n", - " # calling set_configure_optimizers() shall modify the default behavior of configure_optimizers()\n", - " optimizer = torch.optim.Adadelta(params=models[0].parameters(), rho=0.45)\n", - " scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer=optimizer, factor=0.78)\n", - " models[0].set_configure_optimizers(\n", - " optimizer=optimizer,\n", - " scheduler=scheduler,\n", - " )\n", - " nf2 = NeuralForecast(models=models, freq='M')\n", + " # employ custom optimizer\n", + " params.update({'config_optimizers': custom_optimizer})\n", + " models2 = [nf_model(**params)]\n", + " nf2 = NeuralForecast(models=models2, freq='M')\n", " nf2.fit(AirPassengersPanel_train)\n", " customized_predict = nf2.predict()\n", " mean2 = customized_predict.loc[:, nf_model.__name__].mean()\n", - " assert mean2 != mean\n", - "\n", - " # test that frequency configured has effect on optimization behavior\n", - " models[0].set_configure_optimizers(\n", - " optimizer=optimizer,\n", - " scheduler=scheduler,\n", - " frequency=2,\n", - " )\n", - " nf3 = NeuralForecast(models=models, freq='M')\n", - " nf3.fit(AirPassengersPanel_train)\n", - " customized_predict3 = nf3.predict()\n", - " mean3 = customized_predict3.loc[:, nf_model.__name__].mean()\n", - " assert mean3 != mean\n", - "\n", - " # test ReduceLROnPlateau\n", - " scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n", - " optimizer, mode='min', factor=0.5, patience=2,\n", - " )\n", - " \n", - " models[0].set_configure_optimizers(\n", - " optimizer=optimizer,\n", - " scheduler=scheduler,\n", - " monitor=\"train_loss\",\n", - " )\n", - " nf4 = NeuralForecast(models=models, freq='M')\n", - " nf4.fit(AirPassengersPanel_train)\n", - " customized_predict4 = nf4.predict()\n", - " mean4 = customized_predict4.loc[:, nf_model.__name__].mean()\n", - " assert mean4 != mean \n" + " assert mean2 != mean\n" ] } ], diff --git a/neuralforecast/common/_base_model.py b/neuralforecast/common/_base_model.py index d055f80e7..bb001e2b4 100644 --- a/neuralforecast/common/_base_model.py +++ b/neuralforecast/common/_base_model.py @@ -76,6 +76,7 @@ def __init__( stat_exog_list, max_steps, early_stop_patience_steps, + config_optimizers=None, **trainer_kwargs, ): super().__init__() @@ -96,8 +97,8 @@ def __init__( self.train_trajectories = [] self.valid_trajectories = [] - # customized by set_configure_optimizers() - self.config_optimizers = None + # function has the same signature as LightningModule's configure_optimizer + self.config_optimizers = config_optimizers # Variables self.futr_exog_list = list(futr_exog_list) if futr_exog_list is not None else [] @@ -355,7 +356,7 @@ def on_fit_start(self): def configure_optimizers(self): if self.config_optimizers is not None: # return the customized optimizer settings if specified - return self.config_optimizers + return self.config_optimizers(self) # default choice optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate) @@ -368,51 +369,6 @@ def configure_optimizers(self): } return {"optimizer": optimizer, "lr_scheduler": scheduler} - def set_configure_optimizers( - self, - optimizer=None, - scheduler=None, - interval="step", - frequency=1, - monitor="val_loss", - strict=True, - name=None, - ): - """Helper function to customize the lr_scheduler_config as detailed in - https://lightning.ai/docs/pytorch/stable/common/lightning_module.html#configure-optimizers - - Calling set_configure_optimizers() with valid `optimizer`, `scheduler` shall modify the returned - dictionary of key='optimizer', key='lr_scheduler' in configure_optimizers(). - Note that the default choice of `interval` in set_configure_optiizers() is 'step', - which differs from the choice of 'epoch' used in lightning_module. - """ - lr_scheduler_config = { - "interval": interval, - "frequency": frequency, - "monitor": monitor, - "strict": strict, - "name": name, - } - - if scheduler is not None and optimizer is not None: - if not isinstance(scheduler, torch.optim.lr_scheduler.LRScheduler): - raise TypeError( - "scheduler is not a valid instance of torch.optim.lr_scheduler.LRScheduler" - ) - if not isinstance(optimizer, torch.optim.Optimizer): - raise TypeError( - "optimizer is not a valid instance of torch.optim.Optimizer" - ) - - lr_scheduler_config["scheduler"] = scheduler - self.config_optimizers = { - "optimizer": optimizer, - "lr_scheduler": lr_scheduler_config, - } - else: - # falls back to default option as specified in configure_optimizers() - self.config_optimizers = None - def get_test_size(self): return self.test_size diff --git a/neuralforecast/common/_base_multivariate.py b/neuralforecast/common/_base_multivariate.py index 0396b38ca..c858ee8ca 100644 --- a/neuralforecast/common/_base_multivariate.py +++ b/neuralforecast/common/_base_multivariate.py @@ -50,6 +50,7 @@ def __init__( random_seed=1, alias=None, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super().__init__( @@ -61,6 +62,7 @@ def __init__( stat_exog_list=stat_exog_list, max_steps=max_steps, early_stop_patience_steps=early_stop_patience_steps, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/common/_base_recurrent.py b/neuralforecast/common/_base_recurrent.py index fd8eb5a4e..808bca62c 100644 --- a/neuralforecast/common/_base_recurrent.py +++ b/neuralforecast/common/_base_recurrent.py @@ -50,6 +50,7 @@ def __init__( random_seed=1, alias=None, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super().__init__( @@ -61,6 +62,7 @@ def __init__( stat_exog_list=stat_exog_list, max_steps=max_steps, early_stop_patience_steps=early_stop_patience_steps, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/common/_base_windows.py b/neuralforecast/common/_base_windows.py index f0f3ce3e1..4046ebca3 100644 --- a/neuralforecast/common/_base_windows.py +++ b/neuralforecast/common/_base_windows.py @@ -53,6 +53,7 @@ def __init__( random_seed=1, alias=None, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super().__init__( @@ -64,6 +65,7 @@ def __init__( stat_exog_list=stat_exog_list, max_steps=max_steps, early_stop_patience_steps=early_stop_patience_steps, + config_optimizers=config_optimizers, **trainer_kwargs, ) From c5f20a17cf0aa1b7a42164486d2a83e5b0561bb1 Mon Sep 17 00:00:00 2001 From: t-minus Date: Sun, 22 Dec 2024 10:57:49 +0000 Subject: [PATCH 8/8] Add arguments and doc string --- nbs/models.autoformer.ipynb | 7 ++++++- nbs/models.bitcn.ipynb | 5 +++++ nbs/models.deepar.ipynb | 5 +++++ nbs/models.deepnpts.ipynb | 5 +++++ nbs/models.dilated_rnn.ipynb | 5 +++++ nbs/models.dlinear.ipynb | 5 +++++ nbs/models.fedformer.ipynb | 7 ++++++- nbs/models.gru.ipynb | 5 +++++ nbs/models.informer.ipynb | 5 +++++ nbs/models.itransformer.ipynb | 7 ++++++- nbs/models.kan.ipynb | 5 +++++ nbs/models.lstm.ipynb | 5 +++++ nbs/models.mlp.ipynb | 5 +++++ nbs/models.mlpmultivariate.ipynb | 5 +++++ nbs/models.nbeats.ipynb | 5 +++++ nbs/models.nbeatsx.ipynb | 5 +++++ nbs/models.nhits.ipynb | 5 +++++ nbs/models.nlinear.ipynb | 5 +++++ nbs/models.patchtst.ipynb | 5 +++++ nbs/models.rmok.ipynb | 7 ++++++- nbs/models.rnn.ipynb | 8 ++++++-- nbs/models.softs.ipynb | 7 ++++++- nbs/models.stemgnn.ipynb | 5 +++++ nbs/models.tcn.ipynb | 7 ++++++- nbs/models.tft.ipynb | 5 +++++ nbs/models.tide.ipynb | 5 +++++ nbs/models.timellm.ipynb | 5 +++++ nbs/models.timemixer.ipynb | 7 ++++++- nbs/models.timesnet.ipynb | 7 ++++++- nbs/models.tsmixer.ipynb | 5 +++++ nbs/models.tsmixerx.ipynb | 5 +++++ nbs/models.vanillatransformer.ipynb | 5 +++++ neuralforecast/models/autoformer.py | 7 ++++++- neuralforecast/models/bitcn.py | 5 +++++ neuralforecast/models/deepar.py | 5 +++++ neuralforecast/models/deepnpts.py | 5 +++++ neuralforecast/models/dilated_rnn.py | 5 +++++ neuralforecast/models/dlinear.py | 5 +++++ neuralforecast/models/fedformer.py | 5 +++++ neuralforecast/models/gru.py | 5 +++++ neuralforecast/models/informer.py | 5 +++++ neuralforecast/models/itransformer.py | 5 +++++ neuralforecast/models/kan.py | 5 +++++ neuralforecast/models/lstm.py | 5 +++++ neuralforecast/models/mlp.py | 5 +++++ neuralforecast/models/mlpmultivariate.py | 5 +++++ neuralforecast/models/nbeats.py | 5 +++++ neuralforecast/models/nbeatsx.py | 5 +++++ neuralforecast/models/nhits.py | 5 +++++ neuralforecast/models/nlinear.py | 5 +++++ neuralforecast/models/patchtst.py | 5 +++++ neuralforecast/models/rmok.py | 5 +++++ neuralforecast/models/rnn.py | 6 +++++- neuralforecast/models/softs.py | 5 +++++ neuralforecast/models/stemgnn.py | 5 +++++ neuralforecast/models/tcn.py | 5 +++++ neuralforecast/models/tft.py | 5 +++++ neuralforecast/models/tide.py | 5 +++++ neuralforecast/models/timellm.py | 5 +++++ neuralforecast/models/timemixer.py | 5 +++++ neuralforecast/models/timesnet.py | 5 +++++ neuralforecast/models/tsmixer.py | 5 +++++ neuralforecast/models/tsmixerx.py | 5 +++++ neuralforecast/models/vanillatransformer.py | 5 +++++ 64 files changed, 330 insertions(+), 12 deletions(-) diff --git a/nbs/models.autoformer.ipynb b/nbs/models.autoformer.ipynb index ef74fb2e3..872a7b28b 100644 --- a/nbs/models.autoformer.ipynb +++ b/nbs/models.autoformer.ipynb @@ -458,7 +458,10 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", - " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", + " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", "\t- [Wu, Haixu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. \"Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting\"](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)
\n", @@ -503,6 +506,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(Autoformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -527,6 +531,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.bitcn.ipynb b/nbs/models.bitcn.ipynb index 84bb73b08..5723648ad 100644 --- a/nbs/models.bitcn.ipynb +++ b/nbs/models.bitcn.ipynb @@ -178,6 +178,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -216,6 +219,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(BiTCN, self).__init__(\n", " h=h,\n", @@ -241,6 +245,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.deepar.ipynb b/nbs/models.deepar.ipynb index feb9b272e..350a21ca4 100644 --- a/nbs/models.deepar.ipynb +++ b/nbs/models.deepar.ipynb @@ -183,6 +183,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -226,6 +229,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", "\n", " if exclude_insample_y:\n", @@ -264,6 +268,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " self.horizon_backup = self.h # Used because h=0 during training\n", diff --git a/nbs/models.deepnpts.ipynb b/nbs/models.deepnpts.ipynb index f53982883..1a0704c00 100644 --- a/nbs/models.deepnpts.ipynb +++ b/nbs/models.deepnpts.ipynb @@ -121,6 +121,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -161,6 +164,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " if exclude_insample_y:\n", @@ -196,6 +200,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " self.h = h\n", diff --git a/nbs/models.dilated_rnn.ipynb b/nbs/models.dilated_rnn.ipynb index e2960e4b0..b8f5ea089 100644 --- a/nbs/models.dilated_rnn.ipynb +++ b/nbs/models.dilated_rnn.ipynb @@ -390,6 +390,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -425,6 +428,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(DilatedRNN, self).__init__(\n", " h=h,\n", @@ -446,6 +450,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.dlinear.ipynb b/nbs/models.dlinear.ipynb index 0cccb748d..efe0878e5 100644 --- a/nbs/models.dlinear.ipynb +++ b/nbs/models.dlinear.ipynb @@ -162,6 +162,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -198,6 +201,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs=None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(DLinear, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -222,6 +226,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.fedformer.ipynb b/nbs/models.fedformer.ipynb index 6b58bcaa0..7558530ed 100644 --- a/nbs/models.fedformer.ipynb +++ b/nbs/models.fedformer.ipynb @@ -451,6 +451,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " \"\"\"\n", @@ -495,6 +498,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(FEDformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -517,7 +521,8 @@ " scaler_type=scaler_type,\n", " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", - " dataloader_kwargs=dataloader_kwargs, \n", + " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Architecture\n", " self.label_len = int(np.ceil(input_size * decoder_input_size_multiplier))\n", diff --git a/nbs/models.gru.ipynb b/nbs/models.gru.ipynb index 5d979bc08..eebe23621 100644 --- a/nbs/models.gru.ipynb +++ b/nbs/models.gru.ipynb @@ -134,6 +134,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -170,6 +173,7 @@ " random_seed=1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(GRU, self).__init__(\n", " h=h,\n", @@ -191,6 +195,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.informer.ipynb b/nbs/models.informer.ipynb index 51a765bc0..bde9492f8 100644 --- a/nbs/models.informer.ipynb +++ b/nbs/models.informer.ipynb @@ -306,6 +306,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -351,6 +354,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(Informer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -375,6 +379,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.itransformer.ipynb b/nbs/models.itransformer.ipynb index 7ee4e7ea0..5ea9736d9 100644 --- a/nbs/models.itransformer.ipynb +++ b/nbs/models.itransformer.ipynb @@ -228,6 +228,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", " **References**
\n", @@ -267,7 +270,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers=None, \n", " **trainer_kwargs):\n", " \n", " super(iTransformer, self).__init__(h=h,\n", @@ -289,6 +293,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.enc_in = n_series\n", diff --git a/nbs/models.kan.ipynb b/nbs/models.kan.ipynb index 50a75ddfc..cdf08b7a3 100644 --- a/nbs/models.kan.ipynb +++ b/nbs/models.kan.ipynb @@ -362,6 +362,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -408,6 +411,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " # Inherit BaseWindows class\n", @@ -434,6 +438,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs = dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.lstm.ipynb b/nbs/models.lstm.ipynb index 0ac6314e7..af9f77f9f 100644 --- a/nbs/models.lstm.ipynb +++ b/nbs/models.lstm.ipynb @@ -121,6 +121,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -156,6 +159,7 @@ " random_seed = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(LSTM, self).__init__(\n", " h=h,\n", @@ -177,6 +181,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.mlp.ipynb b/nbs/models.mlp.ipynb index 67063d696..ae24586cd 100644 --- a/nbs/models.mlp.ipynb +++ b/nbs/models.mlp.ipynb @@ -114,6 +114,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -148,6 +151,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -174,6 +178,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.mlpmultivariate.ipynb b/nbs/models.mlpmultivariate.ipynb index b998e4b17..ed2cfefd1 100644 --- a/nbs/models.mlpmultivariate.ipynb +++ b/nbs/models.mlpmultivariate.ipynb @@ -108,6 +108,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -138,6 +141,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -160,6 +164,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nbeats.ipynb b/nbs/models.nbeats.ipynb index 64541fb02..090df8fbf 100644 --- a/nbs/models.nbeats.ipynb +++ b/nbs/models.nbeats.ipynb @@ -270,6 +270,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -310,6 +313,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " # Protect horizon collapsed seasonality and trend NBEATSx-i basis\n", @@ -338,6 +342,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nbeatsx.ipynb b/nbs/models.nbeatsx.ipynb index b0befa8dd..121fb3d73 100644 --- a/nbs/models.nbeatsx.ipynb +++ b/nbs/models.nbeatsx.ipynb @@ -414,6 +414,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -460,6 +463,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs,\n", " ):\n", " # Protect horizon collapsed seasonality and trend NBEATSx-i basis\n", @@ -492,6 +496,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nhits.ipynb b/nbs/models.nhits.ipynb index 79b05b982..d9a6fadf3 100644 --- a/nbs/models.nhits.ipynb +++ b/nbs/models.nhits.ipynb @@ -303,6 +303,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -349,6 +352,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -375,6 +379,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.nlinear.ipynb b/nbs/models.nlinear.ipynb index 30152e9e8..5d1999ec5 100644 --- a/nbs/models.nlinear.ipynb +++ b/nbs/models.nlinear.ipynb @@ -102,6 +102,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -137,6 +140,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(NLinear, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -161,6 +165,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.patchtst.ipynb b/nbs/models.patchtst.ipynb index 99d5a5bc6..1c48ac0e8 100644 --- a/nbs/models.patchtst.ipynb +++ b/nbs/models.patchtst.ipynb @@ -662,6 +662,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -714,6 +717,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers= None,\n", " **trainer_kwargs):\n", " super(PatchTST, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -738,6 +742,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs) \n", "\n", " # Enforce correct patch_len, regardless of user input\n", diff --git a/nbs/models.rmok.ipynb b/nbs/models.rmok.ipynb index 86bc7012e..b666cf835 100644 --- a/nbs/models.rmok.ipynb +++ b/nbs/models.rmok.ipynb @@ -359,6 +359,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " Reference
\n", @@ -395,7 +398,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None, \n", " **trainer_kwargs):\n", " \n", " super(RMoK, self).__init__(h=h,\n", @@ -417,6 +421,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.input_size = input_size\n", diff --git a/nbs/models.rnn.ipynb b/nbs/models.rnn.ipynb index 9fefcd584..cfff45dff 100644 --- a/nbs/models.rnn.ipynb +++ b/nbs/models.rnn.ipynb @@ -126,7 +126,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", " `alias`: str, optional, Custom name of the model.
\n", - "\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -162,7 +164,8 @@ " scaler_type: str='robust',\n", " random_seed=1,\n", " drop_last_loader=False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(RNN, self).__init__(\n", " h=h,\n", @@ -184,6 +187,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.softs.ipynb b/nbs/models.softs.ipynb index 4cf421c85..2f73995b2 100644 --- a/nbs/models.softs.ipynb +++ b/nbs/models.softs.ipynb @@ -200,6 +200,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \n", " **References**
\n", @@ -237,7 +240,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " super(SOFTS, self).__init__(h=h,\n", @@ -259,6 +263,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.h = h\n", diff --git a/nbs/models.stemgnn.ipynb b/nbs/models.stemgnn.ipynb index e9f077956..1a1edc789 100644 --- a/nbs/models.stemgnn.ipynb +++ b/nbs/models.stemgnn.ipynb @@ -204,6 +204,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -236,6 +239,7 @@ " random_seed: int = 1,\n", " drop_last_loader = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers= None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -258,6 +262,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Quick fix for now, fix the model later.\n", " if n_stacks != 2:\n", diff --git a/nbs/models.tcn.ipynb b/nbs/models.tcn.ipynb index c2c2f3cc6..06cc8c9db 100644 --- a/nbs/models.tcn.ipynb +++ b/nbs/models.tcn.ipynb @@ -126,6 +126,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", " \"\"\"\n", " # Class attributes\n", @@ -160,7 +163,8 @@ " scaler_type: str ='robust',\n", " random_seed: int = 1,\n", " drop_last_loader = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers=None,\n", " **trainer_kwargs):\n", " super(TCN, self).__init__(\n", " h=h,\n", @@ -182,6 +186,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs = dataloader_kwargs,\n", + " config_optimizers = config_optimizers,\n", " **trainer_kwargs\n", " )\n", "\n", diff --git a/nbs/models.tft.ipynb b/nbs/models.tft.ipynb index faaab5529..1a5bd3399 100644 --- a/nbs/models.tft.ipynb +++ b/nbs/models.tft.ipynb @@ -696,6 +696,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -739,6 +742,7 @@ " drop_last_loader=False,\n", " random_seed: int = 1,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs,\n", " ):\n", "\n", @@ -766,6 +770,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs,\n", " )\n", " self.example_length = input_size + h\n", diff --git a/nbs/models.tide.ipynb b/nbs/models.tide.ipynb index 3a586cc11..49a0643bb 100644 --- a/nbs/models.tide.ipynb +++ b/nbs/models.tide.ipynb @@ -167,6 +167,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -211,6 +214,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseWindows class\n", @@ -238,6 +242,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs\n", " ) \n", " self.h = h\n", diff --git a/nbs/models.timellm.ipynb b/nbs/models.timellm.ipynb index 1ba9472e6..b8a17b19f 100755 --- a/nbs/models.timellm.ipynb +++ b/nbs/models.timellm.ipynb @@ -291,6 +291,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -343,6 +346,7 @@ " drop_last_loader: bool = False,\n", " random_seed: int = 1,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(TimeLLM, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -366,6 +370,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " # Architecture\n", diff --git a/nbs/models.timemixer.ipynb b/nbs/models.timemixer.ipynb index 129a9d099..5ee98d4fd 100644 --- a/nbs/models.timemixer.ipynb +++ b/nbs/models.timemixer.ipynb @@ -360,6 +360,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References**
\n", @@ -404,7 +407,8 @@ " scaler_type: str = 'identity',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " \n", " super(TimeMixer, self).__init__(h=h,\n", @@ -426,6 +430,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " \n", " self.label_len = int(np.ceil(input_size * decoder_input_size_multiplier))\n", diff --git a/nbs/models.timesnet.ipynb b/nbs/models.timesnet.ipynb index 1e1a3d371..fadfb1c3a 100644 --- a/nbs/models.timesnet.ipynb +++ b/nbs/models.timesnet.ipynb @@ -263,6 +263,9 @@ " If True `TimeSeriesDataLoader` drops last non-full batch.\n", " `dataloader_kwargs`: dict, optional (default=None)\n", " List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " **trainer_kwargs\n", " Keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer)\n", "\n", @@ -305,7 +308,8 @@ " scaler_type: str = 'standard',\n", " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", - " dataloader_kwargs = None, \n", + " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(TimesNet, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -330,6 +334,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/nbs/models.tsmixer.ipynb b/nbs/models.tsmixer.ipynb index 2b05bd51b..bef038037 100644 --- a/nbs/models.tsmixer.ipynb +++ b/nbs/models.tsmixer.ipynb @@ -250,6 +250,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -286,6 +289,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultivariate class\n", @@ -308,6 +312,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Reversible InstanceNormalization layer\n", diff --git a/nbs/models.tsmixerx.ipynb b/nbs/models.tsmixerx.ipynb index 7d340cde7..4916129af 100644 --- a/nbs/models.tsmixerx.ipynb +++ b/nbs/models.tsmixerx.ipynb @@ -274,6 +274,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", " **References:**
\n", @@ -310,6 +313,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", "\n", " # Inherit BaseMultvariate class\n", @@ -332,6 +336,7 @@ " random_seed=random_seed,\n", " drop_last_loader=drop_last_loader,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", " # Reversible InstanceNormalization layer\n", " self.revin = revin\n", diff --git a/nbs/models.vanillatransformer.ipynb b/nbs/models.vanillatransformer.ipynb index f1d60f369..78ef923b5 100644 --- a/nbs/models.vanillatransformer.ipynb +++ b/nbs/models.vanillatransformer.ipynb @@ -198,6 +198,9 @@ " `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
\n", " `alias`: str, optional, Custom name of the model.
\n", " `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
\n", + " `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
\n", + " https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
\n", + " Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
\n", " `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
\n", "\n", "\t*References*
\n", @@ -240,6 +243,7 @@ " random_seed: int = 1,\n", " drop_last_loader: bool = False,\n", " dataloader_kwargs = None,\n", + " config_optimizers = None,\n", " **trainer_kwargs):\n", " super(VanillaTransformer, self).__init__(h=h,\n", " input_size=input_size,\n", @@ -263,6 +267,7 @@ " drop_last_loader=drop_last_loader,\n", " random_seed=random_seed,\n", " dataloader_kwargs=dataloader_kwargs,\n", + " config_optimizers=config_optimizers,\n", " **trainer_kwargs)\n", "\n", " # Architecture\n", diff --git a/neuralforecast/models/autoformer.py b/neuralforecast/models/autoformer.py index 5e97561db..6deeb8005 100644 --- a/neuralforecast/models/autoformer.py +++ b/neuralforecast/models/autoformer.py @@ -442,7 +442,10 @@ class Autoformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
- `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
+ `**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
- [Wu, Haixu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. "Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting"](https://proceedings.neurips.cc/paper/2021/hash/bcc0d400288793e8bdcd7c19a8ac0c2b-Abstract.html)
@@ -489,6 +492,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(Autoformer, self).__init__( @@ -515,6 +519,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/bitcn.py b/neuralforecast/models/bitcn.py index 856727c0a..a631164c4 100644 --- a/neuralforecast/models/bitcn.py +++ b/neuralforecast/models/bitcn.py @@ -116,6 +116,9 @@ class BiTCN(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -156,6 +159,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(BiTCN, self).__init__( @@ -182,6 +186,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/deepar.py b/neuralforecast/models/deepar.py index 047c53496..eedcc4823 100644 --- a/neuralforecast/models/deepar.py +++ b/neuralforecast/models/deepar.py @@ -87,6 +87,9 @@ class DeepAR(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -134,6 +137,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -178,6 +182,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/deepnpts.py b/neuralforecast/models/deepnpts.py index 2ccde3491..77fd2cc29 100644 --- a/neuralforecast/models/deepnpts.py +++ b/neuralforecast/models/deepnpts.py @@ -49,6 +49,9 @@ class DeepNPTS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -91,6 +94,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -132,6 +136,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/dilated_rnn.py b/neuralforecast/models/dilated_rnn.py index 296e4de70..c2c5a2996 100644 --- a/neuralforecast/models/dilated_rnn.py +++ b/neuralforecast/models/dilated_rnn.py @@ -317,6 +317,9 @@ class DilatedRNN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -354,6 +357,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(DilatedRNN, self).__init__( @@ -376,6 +380,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/dlinear.py b/neuralforecast/models/dlinear.py index a43f167c2..8a0c58928 100644 --- a/neuralforecast/models/dlinear.py +++ b/neuralforecast/models/dlinear.py @@ -75,6 +75,9 @@ class DLinear(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -113,6 +116,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(DLinear, self).__init__( @@ -139,6 +143,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/fedformer.py b/neuralforecast/models/fedformer.py index 68990131e..8620d9df1 100644 --- a/neuralforecast/models/fedformer.py +++ b/neuralforecast/models/fedformer.py @@ -440,6 +440,9 @@ class FEDformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -486,6 +489,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(FEDformer, self).__init__( @@ -511,6 +515,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) # Architecture diff --git a/neuralforecast/models/gru.py b/neuralforecast/models/gru.py index 8e428a196..f969d2ddc 100644 --- a/neuralforecast/models/gru.py +++ b/neuralforecast/models/gru.py @@ -52,6 +52,9 @@ class GRU(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -90,6 +93,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(GRU, self).__init__( @@ -112,6 +116,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/informer.py b/neuralforecast/models/informer.py index aefa8afa2..2865b7a87 100644 --- a/neuralforecast/models/informer.py +++ b/neuralforecast/models/informer.py @@ -226,6 +226,9 @@ class Informer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -273,6 +276,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(Informer, self).__init__( @@ -299,6 +303,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/itransformer.py b/neuralforecast/models/itransformer.py index 35a56f77b..30e337954 100644 --- a/neuralforecast/models/itransformer.py +++ b/neuralforecast/models/itransformer.py @@ -134,6 +134,9 @@ class iTransformer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -175,6 +178,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -198,6 +202,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/kan.py b/neuralforecast/models/kan.py index db6c0ae07..a9e95012a 100644 --- a/neuralforecast/models/kan.py +++ b/neuralforecast/models/kan.py @@ -284,6 +284,9 @@ class KAN(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -331,6 +334,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -359,6 +363,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/lstm.py b/neuralforecast/models/lstm.py index ff901ca58..eaa671155 100644 --- a/neuralforecast/models/lstm.py +++ b/neuralforecast/models/lstm.py @@ -50,6 +50,9 @@ class LSTM(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -87,6 +90,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(LSTM, self).__init__( @@ -109,6 +113,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/mlp.py b/neuralforecast/models/mlp.py index 48b780b55..c3f5f452d 100644 --- a/neuralforecast/models/mlp.py +++ b/neuralforecast/models/mlp.py @@ -49,6 +49,9 @@ class MLP(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -85,6 +88,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -113,6 +117,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/mlpmultivariate.py b/neuralforecast/models/mlpmultivariate.py index 31225c64d..dcb065058 100644 --- a/neuralforecast/models/mlpmultivariate.py +++ b/neuralforecast/models/mlpmultivariate.py @@ -43,6 +43,9 @@ class MLPMultivariate(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -75,6 +78,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -99,6 +103,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/nbeats.py b/neuralforecast/models/nbeats.py index a2a36822c..cd55307a0 100644 --- a/neuralforecast/models/nbeats.py +++ b/neuralforecast/models/nbeats.py @@ -228,6 +228,9 @@ class NBEATS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -270,6 +273,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -300,6 +304,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nbeatsx.py b/neuralforecast/models/nbeatsx.py index 0baa0c6c0..b08bc06dc 100644 --- a/neuralforecast/models/nbeatsx.py +++ b/neuralforecast/models/nbeatsx.py @@ -315,6 +315,9 @@ class NBEATSx(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -361,6 +364,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): # Protect horizon collapsed seasonality and trend NBEATSx-i basis @@ -394,6 +398,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nhits.py b/neuralforecast/models/nhits.py index c1e56f5ba..d28936d5b 100644 --- a/neuralforecast/models/nhits.py +++ b/neuralforecast/models/nhits.py @@ -226,6 +226,9 @@ class NHITS(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -274,6 +277,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -302,6 +306,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/nlinear.py b/neuralforecast/models/nlinear.py index 19e176469..bf633fe04 100644 --- a/neuralforecast/models/nlinear.py +++ b/neuralforecast/models/nlinear.py @@ -39,6 +39,9 @@ class NLinear(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -76,6 +79,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(NLinear, self).__init__( @@ -102,6 +106,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/patchtst.py b/neuralforecast/models/patchtst.py index 314d5620d..95a5d7e7c 100644 --- a/neuralforecast/models/patchtst.py +++ b/neuralforecast/models/patchtst.py @@ -836,6 +836,9 @@ class PatchTST(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -890,6 +893,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(PatchTST, self).__init__( @@ -916,6 +920,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/rmok.py b/neuralforecast/models/rmok.py index 2542b7752..97e179d08 100644 --- a/neuralforecast/models/rmok.py +++ b/neuralforecast/models/rmok.py @@ -284,6 +284,9 @@ class RMoK(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
Reference
@@ -322,6 +325,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -345,6 +349,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/rnn.py b/neuralforecast/models/rnn.py index cb346ed43..8b4865120 100644 --- a/neuralforecast/models/rnn.py +++ b/neuralforecast/models/rnn.py @@ -51,7 +51,9 @@ class RNN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
`alias`: str, optional, Custom name of the model.
- + `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -90,6 +92,7 @@ def __init__( random_seed=1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(RNN, self).__init__( @@ -112,6 +115,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/softs.py b/neuralforecast/models/softs.py index 3b1aadd8b..1a521974c 100644 --- a/neuralforecast/models/softs.py +++ b/neuralforecast/models/softs.py @@ -109,6 +109,9 @@ class SOFTS(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -148,6 +151,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -171,6 +175,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/stemgnn.py b/neuralforecast/models/stemgnn.py index 3d575accf..f3513e8f6 100644 --- a/neuralforecast/models/stemgnn.py +++ b/neuralforecast/models/stemgnn.py @@ -169,6 +169,9 @@ class StemGNN(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -203,6 +206,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -227,6 +231,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) # Quick fix for now, fix the model later. diff --git a/neuralforecast/models/tcn.py b/neuralforecast/models/tcn.py index 79434c32f..a63f1d381 100644 --- a/neuralforecast/models/tcn.py +++ b/neuralforecast/models/tcn.py @@ -47,6 +47,9 @@ class TCN(BaseRecurrent): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
""" @@ -84,6 +87,7 @@ def __init__( random_seed: int = 1, drop_last_loader=False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(TCN, self).__init__( @@ -106,6 +110,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tft.py b/neuralforecast/models/tft.py index eaef38021..ba27b59e7 100644 --- a/neuralforecast/models/tft.py +++ b/neuralforecast/models/tft.py @@ -457,6 +457,9 @@ class TFT(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -500,6 +503,7 @@ def __init__( drop_last_loader=False, random_seed: int = 1, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -527,6 +531,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) self.example_length = input_size + h diff --git a/neuralforecast/models/tide.py b/neuralforecast/models/tide.py index d331d1082..7a8aadaa0 100644 --- a/neuralforecast/models/tide.py +++ b/neuralforecast/models/tide.py @@ -81,6 +81,9 @@ class TiDE(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -127,6 +130,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -155,6 +159,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) self.h = h diff --git a/neuralforecast/models/timellm.py b/neuralforecast/models/timellm.py index aa8d7f075..dbf0869b7 100644 --- a/neuralforecast/models/timellm.py +++ b/neuralforecast/models/timellm.py @@ -214,6 +214,9 @@ class TimeLLM(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -267,6 +270,7 @@ def __init__( drop_last_loader: bool = False, random_seed: int = 1, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(TimeLLM, self).__init__( @@ -292,6 +296,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/timemixer.py b/neuralforecast/models/timemixer.py index 0d5ea5a3d..da8af5aba 100644 --- a/neuralforecast/models/timemixer.py +++ b/neuralforecast/models/timemixer.py @@ -285,6 +285,9 @@ class TimeMixer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References**
@@ -331,6 +334,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): @@ -354,6 +358,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, ) diff --git a/neuralforecast/models/timesnet.py b/neuralforecast/models/timesnet.py index d854ae526..031fd6994 100644 --- a/neuralforecast/models/timesnet.py +++ b/neuralforecast/models/timesnet.py @@ -182,6 +182,9 @@ class TimesNet(BaseWindows): If True `TimeSeriesDataLoader` drops last non-full batch. `dataloader_kwargs`: dict, optional (default=None) List of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
**trainer_kwargs Keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer) @@ -227,6 +230,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): super(TimesNet, self).__init__( @@ -253,6 +257,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixer.py b/neuralforecast/models/tsmixer.py index b41b4181f..35542615c 100644 --- a/neuralforecast/models/tsmixer.py +++ b/neuralforecast/models/tsmixer.py @@ -160,6 +160,9 @@ class TSMixer(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -198,6 +201,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -222,6 +226,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) diff --git a/neuralforecast/models/tsmixerx.py b/neuralforecast/models/tsmixerx.py index 8f5101a7f..ef025fd63 100644 --- a/neuralforecast/models/tsmixerx.py +++ b/neuralforecast/models/tsmixerx.py @@ -188,6 +188,9 @@ class TSMixerx(BaseMultivariate): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
**References:**
@@ -226,6 +229,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs ): @@ -250,6 +254,7 @@ def __init__( random_seed=random_seed, drop_last_loader=drop_last_loader, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs ) # Reversible InstanceNormalization layer diff --git a/neuralforecast/models/vanillatransformer.py b/neuralforecast/models/vanillatransformer.py index 1c4645c24..86177a4f0 100644 --- a/neuralforecast/models/vanillatransformer.py +++ b/neuralforecast/models/vanillatransformer.py @@ -117,6 +117,9 @@ class VanillaTransformer(BaseWindows): `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.
`alias`: str, optional, Custom name of the model.
`dataloader_kwargs`: dict, optional, list of parameters passed into the PyTorch Lightning dataloader by the `TimeSeriesDataLoader`.
+ `config_optimizers`: , optional, A callable function that implements the optimization behavior as detailed in
+ https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.core.LightningModule.html#lightning.pytorch.core.LightningModule.configure_optimizers
+ Note that the function must accept an argument which is the subclass of Neuralforecast's `BaseModel` to speficy the model's parameters() for the optimizer.
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).
*References*
@@ -161,6 +164,7 @@ def __init__( random_seed: int = 1, drop_last_loader: bool = False, dataloader_kwargs=None, + config_optimizers=None, **trainer_kwargs, ): super(VanillaTransformer, self).__init__( @@ -186,6 +190,7 @@ def __init__( drop_last_loader=drop_last_loader, random_seed=random_seed, dataloader_kwargs=dataloader_kwargs, + config_optimizers=config_optimizers, **trainer_kwargs, )