From 05c305b49d2600a3df34994b0c81c1a49e4c5a35 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 28 Mar 2023 19:54:52 +0200 Subject: [PATCH] [pre-commit.ci] pre-commit suggestions (#958) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/PyCQA/docformatter: v1.5.0 → v1.5.1](https://github.com/PyCQA/docformatter/compare/v1.5.0...v1.5.1) - [github.com/PyCQA/isort: 5.11.2 → 5.11.4](https://github.com/PyCQA/isort/compare/5.11.2...5.11.4) * config --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jirka Borovec <6035284+Borda@users.noreply.github.com> Co-authored-by: Jirka --- .pre-commit-config.yaml | 6 +++--- pl_bolts/callbacks/data_monitor.py | 3 --- pl_bolts/datamodules/experience_source.py | 1 - pl_bolts/datamodules/kitti_datamodule.py | 1 - pl_bolts/datamodules/sklearn_datamodule.py | 1 - pl_bolts/datamodules/ssl_imagenet_datamodule.py | 1 - pl_bolts/datamodules/vision_datamodule.py | 1 - pl_bolts/datasets/base_dataset.py | 1 - pl_bolts/datasets/ssl_amdim_datasets.py | 3 --- pl_bolts/losses/self_supervised_learning.py | 10 +++------- .../models/detection/faster_rcnn/faster_rcnn_module.py | 1 - .../models/detection/retinanet/retinanet_module.py | 2 -- pl_bolts/models/gans/pix2pix/pix2pix_module.py | 1 - pl_bolts/models/mnist_module.py | 1 - pl_bolts/models/rl/per_dqn_model.py | 8 +++++++- pl_bolts/models/rl/reinforce_model.py | 1 - pl_bolts/models/rl/vanilla_policy_gradient_model.py | 1 - pl_bolts/models/self_supervised/byol/byol_module.py | 1 - pl_bolts/models/self_supervised/byol/models.py | 2 -- pl_bolts/models/self_supervised/moco/moco2_module.py | 1 - pl_bolts/models/self_supervised/simclr/transforms.py | 2 -- .../models/self_supervised/simsiam/simsiam_module.py | 1 - pl_bolts/models/self_supervised/swav/transforms.py | 1 - pl_bolts/models/vision/unet.py | 1 - pl_bolts/utils/arguments.py | 3 --- tests/datamodules/test_experience_sources.py | 2 -- tests/datasets/test_datasets.py | 1 - tests/losses/test_rl_loss.py | 1 - tests/models/rl/unit/test_agents.py | 1 - tests/utils/test_arguments.py | 2 -- 30 files changed, 13 insertions(+), 49 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 79062e7946..af57fe458e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: name: Upgrade code - repo: https://github.com/PyCQA/docformatter - rev: v1.5.0 + rev: v1.5.1 hooks: - id: docformatter args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120] @@ -45,12 +45,12 @@ repos: exclude: CHANGELOG.md - repo: https://github.com/PyCQA/isort - rev: 5.11.2 + rev: 5.12.0 hooks: - id: isort - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.1.0 hooks: - id: black name: Format code diff --git a/pl_bolts/callbacks/data_monitor.py b/pl_bolts/callbacks/data_monitor.py index 96537962cd..95879847d4 100644 --- a/pl_bolts/callbacks/data_monitor.py +++ b/pl_bolts/callbacks/data_monitor.py @@ -22,7 +22,6 @@ @under_review() class DataMonitorBase(Callback): - supported_loggers = ( TensorBoardLogger, WandbLogger, @@ -113,7 +112,6 @@ def _is_logger_available(self, logger: LightningLoggerBase) -> bool: @under_review() class ModuleDataMonitor(DataMonitorBase): - GROUP_NAME_INPUT = "input" GROUP_NAME_OUTPUT = "output" @@ -199,7 +197,6 @@ def hook(_: Module, inp: Sequence, out: Sequence) -> None: @under_review() class TrainingDataMonitor(DataMonitorBase): - GROUP_NAME = "training_step" def __init__(self, log_every_n_steps: int = None): diff --git a/pl_bolts/datamodules/experience_source.py b/pl_bolts/datamodules/experience_source.py index 840387e3e7..13f5f85fec 100644 --- a/pl_bolts/datamodules/experience_source.py +++ b/pl_bolts/datamodules/experience_source.py @@ -103,7 +103,6 @@ def runner(self, device: torch.device) -> Tuple[Experience]: # step through each env for env_idx, (env, action) in enumerate(zip(self.pool, actions)): - exp = self.env_step(env_idx, env, action) history = self.histories[env_idx] history.append(exp) diff --git a/pl_bolts/datamodules/kitti_datamodule.py b/pl_bolts/datamodules/kitti_datamodule.py index 00f45ee11b..67a0510f49 100644 --- a/pl_bolts/datamodules/kitti_datamodule.py +++ b/pl_bolts/datamodules/kitti_datamodule.py @@ -19,7 +19,6 @@ @under_review() class KittiDataModule(LightningDataModule): - name = "kitti" def __init__( diff --git a/pl_bolts/datamodules/sklearn_datamodule.py b/pl_bolts/datamodules/sklearn_datamodule.py index f86370517b..a40ade07b2 100644 --- a/pl_bolts/datamodules/sklearn_datamodule.py +++ b/pl_bolts/datamodules/sklearn_datamodule.py @@ -117,7 +117,6 @@ def __init__( *args, **kwargs, ) -> None: - super().__init__(*args, **kwargs) self.num_workers = num_workers self.batch_size = batch_size diff --git a/pl_bolts/datamodules/ssl_imagenet_datamodule.py b/pl_bolts/datamodules/ssl_imagenet_datamodule.py index ec5481ebad..30ee567867 100644 --- a/pl_bolts/datamodules/ssl_imagenet_datamodule.py +++ b/pl_bolts/datamodules/ssl_imagenet_datamodule.py @@ -18,7 +18,6 @@ @under_review() class SSLImagenetDataModule(LightningDataModule): # pragma: no cover - name = "imagenet" def __init__( diff --git a/pl_bolts/datamodules/vision_datamodule.py b/pl_bolts/datamodules/vision_datamodule.py index e9ad10db5e..204ebf3aef 100644 --- a/pl_bolts/datamodules/vision_datamodule.py +++ b/pl_bolts/datamodules/vision_datamodule.py @@ -8,7 +8,6 @@ class VisionDataModule(LightningDataModule): - EXTRA_ARGS: dict = {} name: str = "" #: Dataset class to use diff --git a/pl_bolts/datasets/base_dataset.py b/pl_bolts/datasets/base_dataset.py index 5b91c8791c..539dbed565 100644 --- a/pl_bolts/datasets/base_dataset.py +++ b/pl_bolts/datasets/base_dataset.py @@ -16,7 +16,6 @@ @under_review() class LightDataset(ABC, Dataset): - data: Tensor targets: Tensor normalize: tuple diff --git a/pl_bolts/datasets/ssl_amdim_datasets.py b/pl_bolts/datasets/ssl_amdim_datasets.py index 71e303c1b1..b2aa532f28 100644 --- a/pl_bolts/datasets/ssl_amdim_datasets.py +++ b/pl_bolts/datasets/ssl_amdim_datasets.py @@ -30,7 +30,6 @@ def generate_train_val_split(cls, examples, labels, pct_val): cts = {x: 0 for x in range(nb_classes)} for img, class_idx in zip(examples, labels): - # allow labeled if cts[class_idx] < nb_val_images: val_x.append(img) @@ -60,7 +59,6 @@ def select_nb_imgs_per_class(cls, examples, labels, nb_imgs_in_val): cts = {x: 0 for x in range(nb_classes)} for img_name, class_idx in zip(examples, labels): - # allow labeled if cts[class_idx] < nb_imgs_in_val: labeled.append(img_name) @@ -76,7 +74,6 @@ def select_nb_imgs_per_class(cls, examples, labels, nb_imgs_in_val): @classmethod def deterministic_shuffle(cls, x, y): - n = len(x) idxs = list(range(0, n)) np.random.seed(1234) diff --git a/pl_bolts/losses/self_supervised_learning.py b/pl_bolts/losses/self_supervised_learning.py index d82651cc03..01bb3233f6 100644 --- a/pl_bolts/losses/self_supervised_learning.py +++ b/pl_bolts/losses/self_supervised_learning.py @@ -140,11 +140,8 @@ def forward(self, anchor_representations, positive_representations, mask_mat): # trick 2: tanh clip raw_scores = tanh_clip(raw_scores, clip_val=self.tclip) - """ - pos_scores includes scores for all the positive samples - neg_scores includes scores for all the negative samples, with - scores for positive samples set to the min score (-self.tclip here) - """ + """pos_scores includes scores for all the positive samples neg_scores includes scores for all the negative + samples, with scores for positive samples set to the min score (-self.tclip here)""" # ---------------------- # EXTRACT POSITIVE SCORES # use the index mask to pull all the diagonals which are b1 x b1 @@ -337,8 +334,7 @@ def forward(self, anchor_maps, positive_maps): regularizer = 0 losses = [] - for (ai, pi) in self.map_indexes: - + for ai, pi in self.map_indexes: # choose a random map if ai == -1: ai = np.random.randint(0, len(anchor_maps)) diff --git a/pl_bolts/models/detection/faster_rcnn/faster_rcnn_module.py b/pl_bolts/models/detection/faster_rcnn/faster_rcnn_module.py index f9513d6c35..baaee68e20 100644 --- a/pl_bolts/models/detection/faster_rcnn/faster_rcnn_module.py +++ b/pl_bolts/models/detection/faster_rcnn/faster_rcnn_module.py @@ -113,7 +113,6 @@ def forward(self, x): return self.model(x) def training_step(self, batch, batch_idx): - images, targets = batch targets = [{k: v for k, v in t.items()} for t in targets] diff --git a/pl_bolts/models/detection/retinanet/retinanet_module.py b/pl_bolts/models/detection/retinanet/retinanet_module.py index c042f42aa6..d1711707ba 100644 --- a/pl_bolts/models/detection/retinanet/retinanet_module.py +++ b/pl_bolts/models/detection/retinanet/retinanet_module.py @@ -9,7 +9,6 @@ from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: - from torchvision.models.detection.retinanet import RetinaNet as torchvision_RetinaNet from torchvision.models.detection.retinanet import RetinaNetHead, retinanet_resnet50_fpn from torchvision.ops import box_iou @@ -97,7 +96,6 @@ def forward(self, x): return self.model(x) def training_step(self, batch, batch_idx): - images, targets = batch targets = [{k: v for k, v in t.items()} for t in targets] diff --git a/pl_bolts/models/gans/pix2pix/pix2pix_module.py b/pl_bolts/models/gans/pix2pix/pix2pix_module.py index fd0a615c1b..9228432e81 100644 --- a/pl_bolts/models/gans/pix2pix/pix2pix_module.py +++ b/pl_bolts/models/gans/pix2pix/pix2pix_module.py @@ -18,7 +18,6 @@ def _weights_init(m): @under_review() class Pix2Pix(LightningModule): def __init__(self, in_channels, out_channels, learning_rate=0.0002, lambda_recon=200): - super().__init__() self.save_hyperparameters() diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py index 1e470ea579..fd7a7cd414 100644 --- a/pl_bolts/models/mnist_module.py +++ b/pl_bolts/models/mnist_module.py @@ -27,7 +27,6 @@ class LitMNIST(LightningModule): """ def __init__(self, hidden_dim: int = 128, learning_rate: float = 1e-3, **kwargs: Any) -> None: - if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `torchvision` which is not installed yet.") diff --git a/pl_bolts/models/rl/per_dqn_model.py b/pl_bolts/models/rl/per_dqn_model.py index f20bc433e3..dbf30a04cc 100644 --- a/pl_bolts/models/rl/per_dqn_model.py +++ b/pl_bolts/models/rl/per_dqn_model.py @@ -91,7 +91,13 @@ def train_batch( states, actions, rewards, dones, new_states = samples for idx, _ in enumerate(dones): - yield (states[idx], actions[idx], rewards[idx], dones[idx], new_states[idx],), indices[ + yield ( + states[idx], + actions[idx], + rewards[idx], + dones[idx], + new_states[idx], + ), indices[ idx ], weights[idx] diff --git a/pl_bolts/models/rl/reinforce_model.py b/pl_bolts/models/rl/reinforce_model.py index 52e1b3f88e..b3eb5d8f19 100644 --- a/pl_bolts/models/rl/reinforce_model.py +++ b/pl_bolts/models/rl/reinforce_model.py @@ -173,7 +173,6 @@ def train_batch( """ while True: - action = self.agent(self.state, self.device) next_state, reward, done, _ = self.env.step(action[0]) diff --git a/pl_bolts/models/rl/vanilla_policy_gradient_model.py b/pl_bolts/models/rl/vanilla_policy_gradient_model.py index 7a2909e722..b86b1134aa 100644 --- a/pl_bolts/models/rl/vanilla_policy_gradient_model.py +++ b/pl_bolts/models/rl/vanilla_policy_gradient_model.py @@ -132,7 +132,6 @@ def train_batch( """ while True: - action = self.agent(self.state, self.device) next_state, reward, done, _ = self.env.step(action[0]) diff --git a/pl_bolts/models/self_supervised/byol/byol_module.py b/pl_bolts/models/self_supervised/byol/byol_module.py index 0d2f7fd8a8..679ea2c685 100644 --- a/pl_bolts/models/self_supervised/byol/byol_module.py +++ b/pl_bolts/models/self_supervised/byol/byol_module.py @@ -74,7 +74,6 @@ def __init__( initial_tau: float = 0.996, **kwargs: Any, ) -> None: - super().__init__() self.save_hyperparameters(ignore="base_encoder") diff --git a/pl_bolts/models/self_supervised/byol/models.py b/pl_bolts/models/self_supervised/byol/models.py index 7d6168cc34..497fa112bb 100644 --- a/pl_bolts/models/self_supervised/byol/models.py +++ b/pl_bolts/models/self_supervised/byol/models.py @@ -18,7 +18,6 @@ class MLP(nn.Module): """ def __init__(self, input_dim: int = 2048, hidden_dim: int = 4096, output_dim: int = 256) -> None: - super().__init__() self.model = nn.Sequential( @@ -53,7 +52,6 @@ def __init__( projector_hidden_dim: int = 4096, projector_out_dim: int = 256, ) -> None: - super().__init__() if isinstance(encoder, str): diff --git a/pl_bolts/models/self_supervised/moco/moco2_module.py b/pl_bolts/models/self_supervised/moco/moco2_module.py index 75ae274c9b..009ab622c4 100644 --- a/pl_bolts/models/self_supervised/moco/moco2_module.py +++ b/pl_bolts/models/self_supervised/moco/moco2_module.py @@ -225,7 +225,6 @@ def forward(self, img_q, img_k, queue): # compute key features with torch.no_grad(): # no gradient to keys - # shuffle for making use of BN if self._use_ddp(self.trainer): img_k, idx_unshuffle = self._batch_shuffle_ddp(img_k) diff --git a/pl_bolts/models/self_supervised/simclr/transforms.py b/pl_bolts/models/self_supervised/simclr/transforms.py index 37eccfd6c6..ebb4e54278 100644 --- a/pl_bolts/models/self_supervised/simclr/transforms.py +++ b/pl_bolts/models/self_supervised/simclr/transforms.py @@ -31,7 +31,6 @@ class SimCLRTrainDataTransform: def __init__( self, input_height: int = 224, gaussian_blur: bool = True, jitter_strength: float = 1.0, normalize=None ) -> None: - if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -140,7 +139,6 @@ class SimCLRFinetuneTransform(SimCLRTrainDataTransform): def __init__( self, input_height: int = 224, jitter_strength: float = 1.0, normalize=None, eval_transform: bool = False ) -> None: - super().__init__( normalize=normalize, input_height=input_height, gaussian_blur=None, jitter_strength=jitter_strength ) diff --git a/pl_bolts/models/self_supervised/simsiam/simsiam_module.py b/pl_bolts/models/self_supervised/simsiam/simsiam_module.py index 4ea1afbecc..82eb65a428 100644 --- a/pl_bolts/models/self_supervised/simsiam/simsiam_module.py +++ b/pl_bolts/models/self_supervised/simsiam/simsiam_module.py @@ -75,7 +75,6 @@ def __init__( exclude_bn_bias: bool = False, **kwargs, ) -> None: - super().__init__() self.save_hyperparameters(ignore="base_encoder") diff --git a/pl_bolts/models/self_supervised/swav/transforms.py b/pl_bolts/models/self_supervised/swav/transforms.py index 2047563a50..10238bff9a 100644 --- a/pl_bolts/models/self_supervised/swav/transforms.py +++ b/pl_bolts/models/self_supervised/swav/transforms.py @@ -130,7 +130,6 @@ class SwAVFinetuneTransform: def __init__( self, input_height: int = 224, jitter_strength: float = 1.0, normalize=None, eval_transform: bool = False ) -> None: - self.jitter_strength = jitter_strength self.input_height = input_height self.normalize = normalize diff --git a/pl_bolts/models/vision/unet.py b/pl_bolts/models/vision/unet.py index bd3fe1af20..66233d155b 100644 --- a/pl_bolts/models/vision/unet.py +++ b/pl_bolts/models/vision/unet.py @@ -32,7 +32,6 @@ def __init__( features_start: int = 64, bilinear: bool = False, ): - if num_layers < 1: raise ValueError(f"num_layers = {num_layers}, expected: num_layers > 0") diff --git a/pl_bolts/utils/arguments.py b/pl_bolts/utils/arguments.py index 706e03b372..c0d45f734e 100644 --- a/pl_bolts/utils/arguments.py +++ b/pl_bolts/utils/arguments.py @@ -90,14 +90,11 @@ def gather_lit_args(cls: Any, root_cls: Optional[Any] = None) -> List[LitArg]: arguments: List[LitArg] = [] argument_names = [] for obj in inspect.getmro(cls): - if obj is root_cls and len(arguments) > 0: break if issubclass(obj, root_cls): - default_params = inspect.signature(obj.__init__).parameters # type: ignore - for arg in default_params: arg_type = default_params[arg].annotation arg_default = default_params[arg].default diff --git a/tests/datamodules/test_experience_sources.py b/tests/datamodules/test_experience_sources.py index 3a3cc6e259..db66968c59 100644 --- a/tests/datamodules/test_experience_sources.py +++ b/tests/datamodules/test_experience_sources.py @@ -185,7 +185,6 @@ def test_source_is_done_2step_episode(self): self.source.histories[0].append(self.exp1) for idx, exp in enumerate(self.source.runner(self.device)): - self.assertTrue(isinstance(exp, tuple)) if idx == 0: @@ -211,7 +210,6 @@ def test_source_is_done_metrics(self): history += [self.exp1, self.exp2, self.exp2] for idx, exp in enumerate(self.source.runner(self.device)): - if idx == n_steps - 1: self.assertEqual(self.source._total_rewards[0], 1) self.assertEqual(self.source.total_steps[0], 1) diff --git a/tests/datasets/test_datasets.py b/tests/datasets/test_datasets.py index 7842323b5e..0f36a03782 100644 --- a/tests/datasets/test_datasets.py +++ b/tests/datasets/test_datasets.py @@ -29,7 +29,6 @@ @pytest.mark.parametrize("batch_size,num_samples", [(16, 100), (1, 0)]) def test_dummy_ds(catch_warnings, batch_size, num_samples): if num_samples > 0: - ds = DummyDataset((1, 28, 28), (1,), num_samples=num_samples) dl = DataLoader(ds, batch_size=batch_size) diff --git a/tests/losses/test_rl_loss.py b/tests/losses/test_rl_loss.py index bffe51a573..887259ca24 100644 --- a/tests/losses/test_rl_loss.py +++ b/tests/losses/test_rl_loss.py @@ -13,7 +13,6 @@ class TestRLLoss(TestCase): def setUp(self) -> None: - self.state = torch.rand(32, 4, 84, 84) self.next_state = torch.rand(32, 4, 84, 84) self.action = torch.ones([32]) diff --git a/tests/models/rl/unit/test_agents.py b/tests/models/rl/unit/test_agents.py index 3ed66c763e..4f92b0a0cf 100644 --- a/tests/models/rl/unit/test_agents.py +++ b/tests/models/rl/unit/test_agents.py @@ -31,7 +31,6 @@ def setUp(self) -> None: self.value_agent = ValueAgent(self.net, self.env.action_space.n) def test_value_agent(self): - action = self.value_agent(self.state, self.device) self.assertIsInstance(action, list) self.assertIsInstance(action[0], int) diff --git a/tests/utils/test_arguments.py b/tests/utils/test_arguments.py index fc067915a5..3b118e57e6 100644 --- a/tests/utils/test_arguments.py +++ b/tests/utils/test_arguments.py @@ -7,7 +7,6 @@ class DummyParentModel(LightningModule): - name = "parent-model" def __init__(self, a: int, b: str, c: str = "parent_model_c"): @@ -19,7 +18,6 @@ def forward(self, x): class DummyParentDataModule(LightningDataModule): - name = "parent-dm" def __init__(self, d: str, c: str = "parent_dm_c"):