Skip to content

Commit

Permalink
[FEAT] Added NLinear model (#900)
Browse files Browse the repository at this point in the history
* feat: nlinear implementation

* feat: added nlinear default auto config

* chore: add nlinear to core

* docs: updated README.md

* chore: run nbdev_clean

---------

Co-authored-by: Cristian Challu <cristiani.challu@gmail.com>
  • Loading branch information
ggattoni and cchallu authored Mar 1, 2024
1 parent b9d2587 commit e1e4474
Show file tree
Hide file tree
Showing 9 changed files with 666 additions and 15 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
[![All Contributors](https://img.shields.io/badge/all_contributors-11-orange.svg?style=flat-square)](#contributors-)
<!-- ALL-CONTRIBUTORS-BADGE:END -->

**NeuralForecast** offers a large collection of neural forecasting models focusing on their performance, usability, and robustness. The models range from classic networks like `RNN` to the latest transformers: `MLP`, `LSTM`, `GRU`,`RNN`,`TCN`, `DeepAR`, `NBEATS`, `NBEATSx`, `NHITS`, `DLinear`,`TFT`, `Informer`, `AutoFormer`, `FedFormer`, `PatchTST`,`StemGNN`, and `TimesNet`.
**NeuralForecast** offers a large collection of neural forecasting models focusing on their performance, usability, and robustness. The models range from classic networks like `RNN` to the latest transformers: `MLP`, `LSTM`, `GRU`,`RNN`,`TCN`, `DeepAR`, `NBEATS`, `NBEATSx`, `NHITS`, `DLinear`, `NLinear`,`TFT`, `Informer`, `AutoFormer`, `FedFormer`, `PatchTST`,`StemGNN`, and `TimesNet`.
</div>

## Installation
Expand Down
5 changes: 3 additions & 2 deletions nbs/core.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@
"from neuralforecast.tsdataset import TimeSeriesDataset\n",
"from neuralforecast.models import (\n",
" GRU, LSTM, RNN, TCN, DeepAR, DilatedRNN,\n",
" MLP, NHITS, NBEATS, NBEATSx, DLinear,\n",
" MLP, NHITS, NBEATS, NBEATSx, DLinear, NLinear,\n",
" TFT, VanillaTransformer,\n",
" Informer, Autoformer, FEDformer,\n",
" StemGNN, PatchTST, TimesNet\n",
Expand Down Expand Up @@ -205,7 +205,8 @@
"MODEL_FILENAME_DICT = {\n",
" 'autoformer': Autoformer, 'autoautoformer': Autoformer,\n",
" 'deepar': DeepAR, 'autodeepar': DeepAR,\n",
" 'dlinear': DLinear, 'autodlinear': DLinear, \n",
" 'dlinear': DLinear, 'autodlinear': DLinear,\n",
" 'nlinear': NLinear, 'autonlinear': NLinear, \n",
" 'dilatedrnn': DilatedRNN , 'autodilatedrnn': DilatedRNN,\n",
" 'fedformer': FEDformer, 'autofedformer': FEDformer,\n",
" 'gru': GRU, 'autogru': GRU,\n",
Expand Down
101 changes: 101 additions & 0 deletions nbs/models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
"from neuralforecast.models.nbeatsx import NBEATSx\n",
"from neuralforecast.models.nhits import NHITS\n",
"from neuralforecast.models.dlinear import DLinear\n",
"from neuralforecast.models.nlinear import NLinear\n",
"\n",
"from neuralforecast.models.tft import TFT\n",
"from neuralforecast.models.vanillatransformer import VanillaTransformer\n",
Expand Down Expand Up @@ -1285,6 +1286,106 @@
"assert model.config(MockTrial())['h'] == 12"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "180a5a10",
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"class AutoNLinear(BaseAuto):\n",
"\n",
" default_config = {\n",
" \"input_size_multiplier\": [1, 2, 3, 4, 5],\n",
" \"h\": None,\n",
" \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n",
" \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n",
" \"max_steps\": tune.quniform(lower=500, upper=1500, q=100),\n",
" \"batch_size\": tune.choice([32, 64, 128, 256]),\n",
" \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n",
" \"loss\": None,\n",
" \"random_seed\": tune.randint(lower=1, upper=20),\n",
" }\n",
"\n",
" def __init__(self,\n",
" h,\n",
" loss=MAE(),\n",
" valid_loss=None,\n",
" config=None, \n",
" search_alg=BasicVariantGenerator(random_state=1),\n",
" num_samples=10,\n",
" refit_with_val=False,\n",
" cpus=cpu_count(),\n",
" gpus=torch.cuda.device_count(),\n",
" verbose=False,\n",
" alias=None,\n",
" backend='ray',\n",
" callbacks=None,\n",
" ):\n",
"\n",
" # Define search space, input/output sizes\n",
" if config is None:\n",
" config = self.default_config.copy() \n",
" config['input_size'] = tune.choice([h*x \\\n",
" for x in self.default_config[\"input_size_multiplier\"]])\n",
" \n",
" # Rolling windows with step_size=1 or step_size=h\n",
" # See `BaseWindows` and `BaseRNN`'s create_windows\n",
" config['step_size'] = tune.choice([1, h])\n",
" del config[\"input_size_multiplier\"]\n",
" if backend == 'optuna':\n",
" config = self._ray_config_to_optuna(config) \n",
"\n",
" super(AutoNLinear, self).__init__(\n",
" cls_model=NLinear, \n",
" h=h,\n",
" loss=loss,\n",
" valid_loss=valid_loss,\n",
" config=config,\n",
" search_alg=search_alg,\n",
" num_samples=num_samples,\n",
" refit_with_val=refit_with_val,\n",
" cpus=cpus,\n",
" gpus=gpus,\n",
" verbose=verbose,\n",
" alias=alias,\n",
" backend=backend,\n",
" callbacks=callbacks,\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6e414458",
"metadata": {},
"outputs": [],
"source": [
"show_doc(AutoNLinear, title_level=3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80759148",
"metadata": {},
"outputs": [],
"source": [
"%%capture\n",
"# Use your own config or AutoNLinear.default_config\n",
"config = dict(max_steps=2, val_check_steps=1, input_size=12)\n",
"model = AutoNLinear(h=12, config=config, num_samples=1, cpus=1)\n",
"\n",
"# Fit and predict\n",
"model.fit(dataset=dataset)\n",
"y_hat = model.predict(dataset=dataset)\n",
"\n",
"# Optuna\n",
"model = AutoNLinear(h=12, config=None, backend='optuna')\n",
"assert model.config(MockTrial())['h'] == 12"
]
},
{
"attachments": {},
"cell_type": "markdown",
Expand Down
Loading

0 comments on commit e1e4474

Please sign in to comment.