diff --git a/.github/workflows/style_type_checks.yml b/.github/workflows/style_type_checks.yml index 8ad3c0fa68..48c2178edc 100644 --- a/.github/workflows/style_type_checks.yml +++ b/.github/workflows/style_type_checks.yml @@ -9,22 +9,16 @@ jobs: steps: - uses: actions/checkout@v3 - uses: extractions/setup-just@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Python 3.8 - uses: actions/setup-python@v4 - with: - python-version: '3.8' + - uses: actions/setup-python@v4 - name: Install dependencies run: | pip install . - pip install click black mypy - pip install types-python-dateutil - pip install types-waitress - pip install types-PyYAML - - name: Style and type checks - run: | - just black - just mypy + # todo: install also `black[jupyter]` + pip install click "black==24.02" "mypy==1.8.0" \ + types-python-dateutil types-waitress types-PyYAML + - name: Style check + run: just black + - name: Type check + run: just mypy - name: Check license headers run: just license diff --git a/Justfile b/Justfile index 1b4243f147..db6f48cedb 100644 --- a/Justfile +++ b/Justfile @@ -34,7 +34,7 @@ release: python setup.py sdist black: - black --check src test examples + black --check --color src test examples mypy: python setup.py type_check diff --git a/examples/anomaly_detection.py b/examples/anomaly_detection.py index bf74d5f95f..094404486f 100644 --- a/examples/anomaly_detection.py +++ b/examples/anomaly_detection.py @@ -14,6 +14,7 @@ """ This example shows how to do anomaly detection with DeepAR. The model is first trained and then time-points with the largest negative log-likelihood are plotted. + """ import numpy as np from itertools import islice diff --git a/examples/warm_start.py b/examples/warm_start.py index f8ca97eafd..fd425615a8 100644 --- a/examples/warm_start.py +++ b/examples/warm_start.py @@ -13,6 +13,7 @@ """ This example show how to intialize the network with parameters from a model that was previously trained. + """ from gluonts.dataset.repository import get_dataset, dataset_recipes diff --git a/pyproject.toml b/pyproject.toml index 262389ee98..0e409fddbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,5 @@ [tool.black] +target-version = ['py38'] line-length = 79 [tool.pytest.ini_options] diff --git a/src/gluonts/ext/rotbaum/_model.py b/src/gluonts/ext/rotbaum/_model.py index 3c0270a49a..7233df39c8 100644 --- a/src/gluonts/ext/rotbaum/_model.py +++ b/src/gluonts/ext/rotbaum/_model.py @@ -142,7 +142,7 @@ def fit( # XGBoost, but True if one uses lightgbm. model_is_already_trained: bool = False, # True if there is no need to # train self.model - **kwargs + **kwargs, ): """ Fits self.model and partitions R^n into cells. diff --git a/src/gluonts/ext/rotbaum/_preprocess.py b/src/gluonts/ext/rotbaum/_preprocess.py index 6baee5be68..c45835a45e 100644 --- a/src/gluonts/ext/rotbaum/_preprocess.py +++ b/src/gluonts/ext/rotbaum/_preprocess.py @@ -49,7 +49,7 @@ def __init__( max_n_datapts: int = 400000, seed: Optional[int] = None, num_samples: Optional[int] = None, - **kwargs + **kwargs, ): """ Parameters @@ -296,7 +296,7 @@ def __init__( one_hot_encode: bool = True, subtract_mean: bool = True, count_nans: bool = False, - **kwargs + **kwargs, ): if one_hot_encode: assert cardinality != "ignore" or ( @@ -313,7 +313,7 @@ def __init__( stratify_targets=stratify_targets, n_ignore_last=n_ignore_last, num_samples=num_samples, - **kwargs + **kwargs, ) self.use_feat_static_real = use_feat_static_real diff --git a/src/gluonts/model/evaluation.py b/src/gluonts/model/evaluation.py index 15638dea84..473aa2397e 100644 --- a/src/gluonts/model/evaluation.py +++ b/src/gluonts/model/evaluation.py @@ -104,7 +104,7 @@ def evaluate_forecasts_raw( batch_size: int = 100, mask_invalid_label: bool = True, allow_nan_forecast: bool = False, - seasonality: Optional[int] = None + seasonality: Optional[int] = None, ) -> dict: """ Evaluate ``forecasts`` by comparing them with ``test_data``, according @@ -189,7 +189,7 @@ def evaluate_forecasts( batch_size: int = 100, mask_invalid_label: bool = True, allow_nan_forecast: bool = False, - seasonality: Optional[int] = None + seasonality: Optional[int] = None, ) -> pd.DataFrame: """ Evaluate ``forecasts`` by comparing them with ``test_data``, according @@ -243,7 +243,7 @@ def evaluate_model( batch_size: int = 100, mask_invalid_label: bool = True, allow_nan_forecast: bool = False, - seasonality: Optional[int] = None + seasonality: Optional[int] = None, ) -> pd.DataFrame: """ Evaluate ``model`` when applied to ``test_data``, according diff --git a/src/gluonts/model/forecast_generator.py b/src/gluonts/model/forecast_generator.py index d66a1d361d..42caf1fa9e 100644 --- a/src/gluonts/model/forecast_generator.py +++ b/src/gluonts/model/forecast_generator.py @@ -94,7 +94,7 @@ def __call__( input_names: List[str], output_transform: Optional[OutputTransform], num_samples: Optional[int], - **kwargs + **kwargs, ) -> Iterator[Forecast]: raise NotImplementedError() @@ -111,7 +111,7 @@ def __call__( input_names: List[str], output_transform: Optional[OutputTransform], num_samples: Optional[int], - **kwargs + **kwargs, ) -> Iterator[Forecast]: for batch in inference_data_loader: inputs = select(input_names, batch, ignore_missing=True) @@ -155,7 +155,7 @@ def __call__( input_names: List[str], output_transform: Optional[OutputTransform], num_samples: Optional[int], - **kwargs + **kwargs, ) -> Iterator[Forecast]: for batch in inference_data_loader: inputs = select(input_names, batch, ignore_missing=True) @@ -205,7 +205,7 @@ def __call__( input_names: List[str], output_transform: Optional[OutputTransform], num_samples: Optional[int], - **kwargs + **kwargs, ) -> Iterator[Forecast]: for batch in inference_data_loader: inputs = select(input_names, batch, ignore_missing=True) diff --git a/src/gluonts/mx/block/regularization.py b/src/gluonts/mx/block/regularization.py index aeb8719ed3..2716d81b57 100644 --- a/src/gluonts/mx/block/regularization.py +++ b/src/gluonts/mx/block/regularization.py @@ -49,7 +49,7 @@ def __init__( weight: Optional[float] = None, batch_axis: int = 1, time_axis: int = 0, - **kwargs + **kwargs, ): super().__init__(weight, batch_axis, **kwargs) self._alpha = alpha @@ -121,7 +121,7 @@ def __init__( weight: Optional[float] = None, batch_axis: int = 1, time_axis: int = 0, - **kwargs + **kwargs, ): super().__init__(weight, batch_axis, **kwargs) self._beta = beta diff --git a/src/gluonts/mx/block/scaler.py b/src/gluonts/mx/block/scaler.py index 11eb0ba9b8..66b49b45f7 100644 --- a/src/gluonts/mx/block/scaler.py +++ b/src/gluonts/mx/block/scaler.py @@ -121,7 +121,7 @@ def __init__( minimum_scale: float = 1e-10, default_scale: Optional[float] = None, *args, - **kwargs + **kwargs, ): super().__init__(*args, **kwargs) self.minimum_scale = minimum_scale diff --git a/src/gluonts/mx/block/sndense.py b/src/gluonts/mx/block/sndense.py index 906c7fd713..0e46fa4c14 100644 --- a/src/gluonts/mx/block/sndense.py +++ b/src/gluonts/mx/block/sndense.py @@ -51,7 +51,7 @@ def __init__( dtype="float32", num_power_iter: int = 1, ctx: Optional[mx.Context] = None, - **kwargs + **kwargs, ): super().__init__(**kwargs) self._coeff = coeff diff --git a/src/gluonts/mx/distribution/iresnet.py b/src/gluonts/mx/distribution/iresnet.py index bd92f6dcba..5a80c141ca 100644 --- a/src/gluonts/mx/distribution/iresnet.py +++ b/src/gluonts/mx/distribution/iresnet.py @@ -65,7 +65,7 @@ def __init__( coeff: float = 0.9, use_caching: bool = True, *args, - **kwargs + **kwargs, ): super().__init__(*args, **kwargs) assert len(event_shape) == 1 diff --git a/src/gluonts/mx/distribution/lds.py b/src/gluonts/mx/distribution/lds.py index 31d3ae225a..d7d7dfa295 100644 --- a/src/gluonts/mx/distribution/lds.py +++ b/src/gluonts/mx/distribution/lds.py @@ -65,7 +65,7 @@ def _safe_split(x, num_outputs, axis, squeeze_axis, *args, **kwargs): num_outputs=num_outputs, squeeze_axis=squeeze_axis, *args, - **kwargs + **kwargs, ) return [x.squeeze(axis=axis)] if squeeze_axis else [x] diff --git a/src/gluonts/nursery/SCott/dataset_tools/algo_clustering.py b/src/gluonts/nursery/SCott/dataset_tools/algo_clustering.py index 0441113443..63009b7cc7 100644 --- a/src/gluonts/nursery/SCott/dataset_tools/algo_clustering.py +++ b/src/gluonts/nursery/SCott/dataset_tools/algo_clustering.py @@ -232,14 +232,14 @@ def KMeans_m5_dataset( { "target": ts_slice, "start": unsplit_start, - } # , 'feat_static_cat': train_entry['feat_static_cat']} - ) + } + ) # , 'feat_static_cat': train_entry['feat_static_cat']} whole_data.append( { "target": ts_slice, "start": unsplit_start, - } # , 'feat_static_cat': train_entry['feat_static_cat']} - ) + } + ) # , 'feat_static_cat': train_entry['feat_static_cat']} sample_id += 1 print(len(whole_data)) ret["group_ratio"] = [len(i) / len(whole_data) for i in dataset_group] diff --git a/src/gluonts/nursery/daf/tslib/nn/attention/selfattn.py b/src/gluonts/nursery/daf/tslib/nn/attention/selfattn.py index bf5d98a83b..0d8964d804 100644 --- a/src/gluonts/nursery/daf/tslib/nn/attention/selfattn.py +++ b/src/gluonts/nursery/daf/tslib/nn/attention/selfattn.py @@ -309,7 +309,7 @@ def forward( value: Tensor, shape: Tensor, *, - mask: Optional[BoolTensor] = None + mask: Optional[BoolTensor] = None, ) -> Tensor: q, k, v = self._compute_qkv(value, shape) score = self._compute_attn_score(q, k, mask) diff --git a/src/gluonts/nursery/robust-mts-attack/pts/modules/gaussian_diffusion.py b/src/gluonts/nursery/robust-mts-attack/pts/modules/gaussian_diffusion.py index 73195e7957..5666592254 100644 --- a/src/gluonts/nursery/robust-mts-attack/pts/modules/gaussian_diffusion.py +++ b/src/gluonts/nursery/robust-mts-attack/pts/modules/gaussian_diffusion.py @@ -312,7 +312,7 @@ def log_prob(self, x, cond, *args, **kwargs): cond.reshape(B * T, 1, -1), time, *args, - **kwargs + **kwargs, ) return loss diff --git a/src/gluonts/nursery/temporal_hierarchical_forecasting/model/cop_deepar/gnn.py b/src/gluonts/nursery/temporal_hierarchical_forecasting/model/cop_deepar/gnn.py index f081630c04..8aeefc42f8 100644 --- a/src/gluonts/nursery/temporal_hierarchical_forecasting/model/cop_deepar/gnn.py +++ b/src/gluonts/nursery/temporal_hierarchical_forecasting/model/cop_deepar/gnn.py @@ -23,7 +23,7 @@ def __init__( num_layers: int, adj_matrix: Tensor, use_mlp: bool = True, - **kwargs + **kwargs, ): super().__init__(**kwargs) diff --git a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/ensemble_recommender.py b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/ensemble_recommender.py index 811c9ee021..914e3bd217 100644 --- a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/ensemble_recommender.py +++ b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/ensemble_recommender.py @@ -100,7 +100,7 @@ def main( surrogate[surrogate["name"]] if surrogate["name"] in surrogate else {} - ) + ), ) # Then, we can create the recommender diff --git a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/recommender.py b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/recommender.py index f15e065af7..9698c231b2 100644 --- a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/recommender.py +++ b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/recommender.py @@ -123,7 +123,7 @@ def main( surrogate[surrogate["name"]] if surrogate["name"] in surrogate else {} - ) + ), ) elif recommender == "optimal": recommender_args["tracker"] = tracker diff --git a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/surrogate.py b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/surrogate.py index 8bcf74c9cd..e054e49e8a 100644 --- a/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/surrogate.py +++ b/src/gluonts/nursery/tsbench/src/cli/analysis/scripts/surrogate.py @@ -92,7 +92,7 @@ def main( input_flags=inputs, output_normalization=outputs["normalization"], impute_simulatable=outputs["imputation"], - **(_config[surrogate] if surrogate in _config else {}) + **(_config[surrogate] if surrogate in _config else {}), ) # And evaluate it diff --git a/src/gluonts/torch/model/i_transformer/estimator.py b/src/gluonts/torch/model/i_transformer/estimator.py index 541b16753c..71855820dd 100644 --- a/src/gluonts/torch/model/i_transformer/estimator.py +++ b/src/gluonts/torch/model/i_transformer/estimator.py @@ -232,7 +232,7 @@ def create_training_data_loader( data: Dataset, module: ITransformerLightningModule, shuffle_buffer_length: Optional[int] = None, - **kwargs + **kwargs, ) -> Iterable: data = Cyclic(data).stream() instances = self._create_instance_splitter(module, "training").apply( diff --git a/src/gluonts/torch/model/lag_tst/estimator.py b/src/gluonts/torch/model/lag_tst/estimator.py index 9dac576ab6..7e8ad1e759 100644 --- a/src/gluonts/torch/model/lag_tst/estimator.py +++ b/src/gluonts/torch/model/lag_tst/estimator.py @@ -225,7 +225,7 @@ def create_training_data_loader( data: Dataset, module: LagTSTLightningModule, shuffle_buffer_length: Optional[int] = None, - **kwargs + **kwargs, ) -> Iterable: data = Cyclic(data).stream() instances = self._create_instance_splitter(module, "training").apply( diff --git a/src/gluonts/torch/model/patch_tst/estimator.py b/src/gluonts/torch/model/patch_tst/estimator.py index c9e8c51e4f..94ae5b5444 100644 --- a/src/gluonts/torch/model/patch_tst/estimator.py +++ b/src/gluonts/torch/model/patch_tst/estimator.py @@ -229,7 +229,7 @@ def create_training_data_loader( data: Dataset, module: PatchTSTLightningModule, shuffle_buffer_length: Optional[int] = None, - **kwargs + **kwargs, ) -> Iterable: data = Cyclic(data).stream() instances = self._create_instance_splitter(module, "training").apply( diff --git a/src/gluonts/torch/model/wavenet/estimator.py b/src/gluonts/torch/model/wavenet/estimator.py index 234ecff237..4a5b2ff0b1 100644 --- a/src/gluonts/torch/model/wavenet/estimator.py +++ b/src/gluonts/torch/model/wavenet/estimator.py @@ -348,7 +348,7 @@ def create_training_data_loader( data: Dataset, module: WaveNetLightningModule, shuffle_buffer_length: Optional[int] = None, - **kwargs + **kwargs, ) -> Iterable: data = Cyclic(data).stream() instances = self._create_instance_splitter("training").apply( diff --git a/test/nursery/sagemaker_sdk/test_entry_point_scripts.py b/test/nursery/sagemaker_sdk/test_entry_point_scripts.py index 7944700fbb..5e49244268 100644 --- a/test/nursery/sagemaker_sdk/test_entry_point_scripts.py +++ b/test/nursery/sagemaker_sdk/test_entry_point_scripts.py @@ -91,7 +91,7 @@ def test_train_script(dataset_name, custom_dataset): estimator = estimator_cls.from_hyperparameters( prediction_length=dataset.metadata.prediction_length, freq=dataset.metadata.freq, - **hyperparameters + **hyperparameters, ) serialized = serde.dump_json(estimator) with open(temp_dir_path / "estimator.json", "w") as estimator_file: