From 0cab7a0b65d75ad645cc0b063b2f69950c6da718 Mon Sep 17 00:00:00 2001 From: Shion Date: Tue, 11 Oct 2022 08:13:45 -0400 Subject: [PATCH 01/19] Move SSL transforms to pl_bolts/transforms --- pl_bolts/models/self_supervised/amdim/__init__.py | 2 +- pl_bolts/models/self_supervised/amdim/datasets.py | 2 +- pl_bolts/models/self_supervised/cpc/__init__.py | 2 +- pl_bolts/models/self_supervised/cpc/cpc_finetuner.py | 2 +- pl_bolts/models/self_supervised/cpc/cpc_module.py | 2 +- pl_bolts/models/self_supervised/moco/__init__.py | 2 +- pl_bolts/models/self_supervised/moco/moco2_module.py | 2 +- pl_bolts/models/self_supervised/simclr/__init__.py | 2 +- .../models/self_supervised/simclr/simclr_finetuner.py | 2 +- pl_bolts/models/self_supervised/simclr/simclr_module.py | 2 +- pl_bolts/models/self_supervised/swav/__init__.py | 2 +- pl_bolts/models/self_supervised/swav/swav_finetuner.py | 2 +- pl_bolts/models/self_supervised/swav/swav_module.py | 2 +- .../self_supervised/amdim_transforms.py} | 0 .../self_supervised/cpc_transforms.py} | 0 .../self_supervised/moco_transforms.py} | 0 .../self_supervised/simclr_transforms.py} | 0 .../self_supervised/swav_transforms.py} | 0 tests/models/self_supervised/test_models.py | 6 +++--- tests/models/self_supervised/unit/test_transforms.py | 2 +- tests/transforms/test_transforms.py | 8 ++++---- 21 files changed, 21 insertions(+), 21 deletions(-) rename pl_bolts/{models/self_supervised/amdim/transforms.py => transforms/self_supervised/amdim_transforms.py} (100%) rename pl_bolts/{models/self_supervised/cpc/transforms.py => transforms/self_supervised/cpc_transforms.py} (100%) rename pl_bolts/{models/self_supervised/moco/transforms.py => transforms/self_supervised/moco_transforms.py} (100%) rename pl_bolts/{models/self_supervised/simclr/transforms.py => transforms/self_supervised/simclr_transforms.py} (100%) rename pl_bolts/{models/self_supervised/swav/transforms.py => transforms/self_supervised/swav_transforms.py} (100%) diff --git a/pl_bolts/models/self_supervised/amdim/__init__.py b/pl_bolts/models/self_supervised/amdim/__init__.py index e3dec40752..5bce3f24b6 100644 --- a/pl_bolts/models/self_supervised/amdim/__init__.py +++ b/pl_bolts/models/self_supervised/amdim/__init__.py @@ -1,6 +1,6 @@ from pl_bolts.models.self_supervised.amdim.amdim_module import AMDIM from pl_bolts.models.self_supervised.amdim.networks import AMDIMEncoder -from pl_bolts.models.self_supervised.amdim.transforms import ( +from pl_bolts.transforms.self_supervised.amdim_transforms import ( AMDIMEvalTransformsCIFAR10, AMDIMEvalTransformsImageNet128, AMDIMEvalTransformsSTL10, diff --git a/pl_bolts/models/self_supervised/amdim/datasets.py b/pl_bolts/models/self_supervised/amdim/datasets.py index 8eb5021fc0..bdb87e2e3a 100644 --- a/pl_bolts/models/self_supervised/amdim/datasets.py +++ b/pl_bolts/models/self_supervised/amdim/datasets.py @@ -3,7 +3,7 @@ from torch.utils.data import random_split from pl_bolts.datasets import CIFAR10Mixed, UnlabeledImagenet -from pl_bolts.models.self_supervised.amdim import transforms as amdim_transforms +from pl_bolts.transforms.self_supervised import amdim_transforms as amdim_transforms from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg diff --git a/pl_bolts/models/self_supervised/cpc/__init__.py b/pl_bolts/models/self_supervised/cpc/__init__.py index bbdbc67d8f..de39f93f18 100644 --- a/pl_bolts/models/self_supervised/cpc/__init__.py +++ b/pl_bolts/models/self_supervised/cpc/__init__.py @@ -1,6 +1,6 @@ from pl_bolts.models.self_supervised.cpc.cpc_module import CPC_v2 from pl_bolts.models.self_supervised.cpc.networks import cpc_resnet50, cpc_resnet101 -from pl_bolts.models.self_supervised.cpc.transforms import ( +from pl_bolts.transforms.self_supervised.cpc_transforms import ( CPCEvalTransformsCIFAR10, CPCEvalTransformsImageNet128, CPCEvalTransformsSTL10, diff --git a/pl_bolts/models/self_supervised/cpc/cpc_finetuner.py b/pl_bolts/models/self_supervised/cpc/cpc_finetuner.py index 145b970fd9..7d66cc772f 100644 --- a/pl_bolts/models/self_supervised/cpc/cpc_finetuner.py +++ b/pl_bolts/models/self_supervised/cpc/cpc_finetuner.py @@ -4,7 +4,7 @@ from pytorch_lightning import Trainer, seed_everything from pl_bolts.models.self_supervised import CPC_v2, SSLFineTuner -from pl_bolts.models.self_supervised.cpc.transforms import ( +from pl_bolts.transforms.self_supervised.cpc_transforms import ( CPCEvalTransformsCIFAR10, CPCEvalTransformsSTL10, CPCTrainTransformsCIFAR10, diff --git a/pl_bolts/models/self_supervised/cpc/cpc_module.py b/pl_bolts/models/self_supervised/cpc/cpc_module.py index 8add2287dd..1b8c01c52d 100644 --- a/pl_bolts/models/self_supervised/cpc/cpc_module.py +++ b/pl_bolts/models/self_supervised/cpc/cpc_module.py @@ -14,7 +14,7 @@ from pl_bolts.datamodules.stl10_datamodule import STL10DataModule from pl_bolts.losses.self_supervised_learning import CPCTask from pl_bolts.models.self_supervised.cpc.networks import cpc_resnet101 -from pl_bolts.models.self_supervised.cpc.transforms import ( +from pl_bolts.transforms.self_supervised.cpc_transforms import ( CPCEvalTransformsCIFAR10, CPCEvalTransformsImageNet128, CPCEvalTransformsSTL10, diff --git a/pl_bolts/models/self_supervised/moco/__init__.py b/pl_bolts/models/self_supervised/moco/__init__.py index 8548102c5c..8c4ed4fd51 100644 --- a/pl_bolts/models/self_supervised/moco/__init__.py +++ b/pl_bolts/models/self_supervised/moco/__init__.py @@ -1,4 +1,4 @@ -from pl_bolts.models.self_supervised.moco.transforms import ( # noqa: F401 +from pl_bolts.transforms.self_supervised.moco_transforms import ( # noqa: F401 Moco2EvalCIFAR10Transforms, Moco2EvalImagenetTransforms, Moco2EvalSTL10Transforms, diff --git a/pl_bolts/models/self_supervised/moco/moco2_module.py b/pl_bolts/models/self_supervised/moco/moco2_module.py index e9b4f4cd62..61dbdf0a51 100644 --- a/pl_bolts/models/self_supervised/moco/moco2_module.py +++ b/pl_bolts/models/self_supervised/moco/moco2_module.py @@ -18,7 +18,7 @@ from torch.nn import functional as F from pl_bolts.metrics import mean, precision_at_k -from pl_bolts.models.self_supervised.moco.transforms import ( +from pl_bolts.transforms.self_supervised.moco_transforms import ( Moco2EvalCIFAR10Transforms, Moco2EvalImagenetTransforms, Moco2EvalSTL10Transforms, diff --git a/pl_bolts/models/self_supervised/simclr/__init__.py b/pl_bolts/models/self_supervised/simclr/__init__.py index ef4ba84392..c19c31beff 100644 --- a/pl_bolts/models/self_supervised/simclr/__init__.py +++ b/pl_bolts/models/self_supervised/simclr/__init__.py @@ -1,4 +1,4 @@ -from pl_bolts.models.self_supervised.simclr.transforms import ( # noqa: F401 +from pl_bolts.transforms.self_supervised.simclr_transforms import ( # noqa: F401 SimCLREvalDataTransform, SimCLRTrainDataTransform, ) diff --git a/pl_bolts/models/self_supervised/simclr/simclr_finetuner.py b/pl_bolts/models/self_supervised/simclr/simclr_finetuner.py index f93a0ce9e9..6b6115046f 100644 --- a/pl_bolts/models/self_supervised/simclr/simclr_finetuner.py +++ b/pl_bolts/models/self_supervised/simclr/simclr_finetuner.py @@ -4,13 +4,13 @@ from pytorch_lightning import Trainer, seed_everything from pl_bolts.models.self_supervised.simclr.simclr_module import SimCLR -from pl_bolts.models.self_supervised.simclr.transforms import SimCLRFinetuneTransform from pl_bolts.models.self_supervised.ssl_finetuner import SSLFineTuner from pl_bolts.transforms.dataset_normalizations import ( cifar10_normalization, imagenet_normalization, stl10_normalization, ) +from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLRFinetuneTransform from pl_bolts.utils.stability import under_review diff --git a/pl_bolts/models/self_supervised/simclr/simclr_module.py b/pl_bolts/models/self_supervised/simclr/simclr_module.py index 077aa92870..0dca502091 100644 --- a/pl_bolts/models/self_supervised/simclr/simclr_module.py +++ b/pl_bolts/models/self_supervised/simclr/simclr_module.py @@ -308,7 +308,7 @@ def add_model_specific_args(parent_parser): def cli_main(): from pl_bolts.callbacks.ssl_online import SSLOnlineEvaluator from pl_bolts.datamodules import CIFAR10DataModule, ImagenetDataModule, STL10DataModule - from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform + from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform parser = ArgumentParser() diff --git a/pl_bolts/models/self_supervised/swav/__init__.py b/pl_bolts/models/self_supervised/swav/__init__.py index ddddff1890..34eed30e50 100644 --- a/pl_bolts/models/self_supervised/swav/__init__.py +++ b/pl_bolts/models/self_supervised/swav/__init__.py @@ -1,7 +1,7 @@ from pl_bolts.models.self_supervised.swav.loss import SWAVLoss from pl_bolts.models.self_supervised.swav.swav_module import SwAV from pl_bolts.models.self_supervised.swav.swav_resnet import resnet18, resnet50 -from pl_bolts.models.self_supervised.swav.transforms import ( +from pl_bolts.transforms.self_supervised.swav_transforms import ( SwAVEvalDataTransform, SwAVFinetuneTransform, SwAVTrainDataTransform, diff --git a/pl_bolts/models/self_supervised/swav/swav_finetuner.py b/pl_bolts/models/self_supervised/swav/swav_finetuner.py index 4754846bff..1e85da593d 100644 --- a/pl_bolts/models/self_supervised/swav/swav_finetuner.py +++ b/pl_bolts/models/self_supervised/swav/swav_finetuner.py @@ -5,8 +5,8 @@ from pl_bolts.models.self_supervised.ssl_finetuner import SSLFineTuner from pl_bolts.models.self_supervised.swav.swav_module import SwAV -from pl_bolts.models.self_supervised.swav.transforms import SwAVFinetuneTransform from pl_bolts.transforms.dataset_normalizations import imagenet_normalization, stl10_normalization +from pl_bolts.transforms.self_supervised.swav_transforms import SwAVFinetuneTransform def cli_main(): # pragma: no cover diff --git a/pl_bolts/models/self_supervised/swav/swav_module.py b/pl_bolts/models/self_supervised/swav/swav_module.py index c253a08431..cda6480212 100644 --- a/pl_bolts/models/self_supervised/swav/swav_module.py +++ b/pl_bolts/models/self_supervised/swav/swav_module.py @@ -382,7 +382,7 @@ def add_model_specific_args(parent_parser): def cli_main(): from pl_bolts.callbacks.ssl_online import SSLOnlineEvaluator from pl_bolts.datamodules import CIFAR10DataModule, ImagenetDataModule, STL10DataModule - from pl_bolts.models.self_supervised.swav.transforms import SwAVEvalDataTransform, SwAVTrainDataTransform + from pl_bolts.transforms.self_supervised.swav_transforms import SwAVEvalDataTransform, SwAVTrainDataTransform parser = ArgumentParser() diff --git a/pl_bolts/models/self_supervised/amdim/transforms.py b/pl_bolts/transforms/self_supervised/amdim_transforms.py similarity index 100% rename from pl_bolts/models/self_supervised/amdim/transforms.py rename to pl_bolts/transforms/self_supervised/amdim_transforms.py diff --git a/pl_bolts/models/self_supervised/cpc/transforms.py b/pl_bolts/transforms/self_supervised/cpc_transforms.py similarity index 100% rename from pl_bolts/models/self_supervised/cpc/transforms.py rename to pl_bolts/transforms/self_supervised/cpc_transforms.py diff --git a/pl_bolts/models/self_supervised/moco/transforms.py b/pl_bolts/transforms/self_supervised/moco_transforms.py similarity index 100% rename from pl_bolts/models/self_supervised/moco/transforms.py rename to pl_bolts/transforms/self_supervised/moco_transforms.py diff --git a/pl_bolts/models/self_supervised/simclr/transforms.py b/pl_bolts/transforms/self_supervised/simclr_transforms.py similarity index 100% rename from pl_bolts/models/self_supervised/simclr/transforms.py rename to pl_bolts/transforms/self_supervised/simclr_transforms.py diff --git a/pl_bolts/models/self_supervised/swav/transforms.py b/pl_bolts/transforms/self_supervised/swav_transforms.py similarity index 100% rename from pl_bolts/models/self_supervised/swav/transforms.py rename to pl_bolts/transforms/self_supervised/swav_transforms.py diff --git a/tests/models/self_supervised/test_models.py b/tests/models/self_supervised/test_models.py index 9db7ad411c..fe71e73863 100644 --- a/tests/models/self_supervised/test_models.py +++ b/tests/models/self_supervised/test_models.py @@ -9,10 +9,10 @@ from pl_bolts.models.self_supervised import AMDIM, BYOL, CPC_v2, Moco_v2, SimCLR, SimSiam, SwAV from pl_bolts.models.self_supervised.cpc import CPCEvalTransformsCIFAR10, CPCTrainTransformsCIFAR10 from pl_bolts.models.self_supervised.moco.callbacks import MocoLRScheduler -from pl_bolts.models.self_supervised.moco.transforms import Moco2EvalCIFAR10Transforms, Moco2TrainCIFAR10Transforms -from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform -from pl_bolts.models.self_supervised.swav.transforms import SwAVEvalDataTransform, SwAVTrainDataTransform from pl_bolts.transforms.dataset_normalizations import cifar10_normalization +from pl_bolts.transforms.self_supervised.moco_transforms import Moco2EvalCIFAR10Transforms, Moco2TrainCIFAR10Transforms +from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform +from pl_bolts.transforms.self_supervised.swav_transforms import SwAVEvalDataTransform, SwAVTrainDataTransform from tests import _MARK_REQUIRE_GPU diff --git a/tests/models/self_supervised/unit/test_transforms.py b/tests/models/self_supervised/unit/test_transforms.py index eaf97ab3ee..b3cd189953 100644 --- a/tests/models/self_supervised/unit/test_transforms.py +++ b/tests/models/self_supervised/unit/test_transforms.py @@ -3,7 +3,7 @@ import torch from PIL import Image -from pl_bolts.models.self_supervised.simclr.transforms import ( +from pl_bolts.transforms.self_supervised.simclr_transforms import ( SimCLREvalDataTransform, SimCLRFinetuneTransform, SimCLRTrainDataTransform, diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py index 978e0d562e..2f4f493f77 100644 --- a/tests/transforms/test_transforms.py +++ b/tests/transforms/test_transforms.py @@ -11,7 +11,7 @@ "You want to use `torchvision` which is not installed yet, install it with `pip install torchvision`." ) -from pl_bolts.models.self_supervised.amdim.transforms import ( +from pl_bolts.transforms.self_supervised.amdim_transforms import ( AMDIMEvalTransformsCIFAR10, AMDIMEvalTransformsImageNet128, AMDIMEvalTransformsSTL10, @@ -19,7 +19,7 @@ AMDIMTrainTransformsImageNet128, AMDIMTrainTransformsSTL10, ) -from pl_bolts.models.self_supervised.cpc.transforms import ( +from pl_bolts.transforms.self_supervised.cpc_transforms import ( CPCEvalTransformsCIFAR10, CPCEvalTransformsImageNet128, CPCEvalTransformsSTL10, @@ -27,7 +27,7 @@ CPCTrainTransformsImageNet128, CPCTrainTransformsSTL10, ) -from pl_bolts.models.self_supervised.moco.transforms import ( +from pl_bolts.transforms.self_supervised.moco_transforms import ( Moco2EvalCIFAR10Transforms, Moco2EvalImagenetTransforms, Moco2EvalSTL10Transforms, @@ -35,7 +35,7 @@ Moco2TrainImagenetTransforms, Moco2TrainSTL10Transforms, ) -from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform +from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform @pytest.mark.parametrize( From 2aeeb32fb64c476ffe32f33749c85d8bfecd209b Mon Sep 17 00:00:00 2001 From: Shion Date: Tue, 11 Oct 2022 14:36:27 -0400 Subject: [PATCH 02/19] Update self supervised docs --- docs/source/models/self_supervised.rst | 19 +++++++++++++------ docs/source/transforms/self_supervised.rst | 6 +++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/docs/source/models/self_supervised.rst b/docs/source/models/self_supervised.rst index 27054b271d..07b9385a2f 100644 --- a/docs/source/models/self_supervised.rst +++ b/docs/source/models/self_supervised.rst @@ -52,8 +52,10 @@ These models are perfect for training from scratch when you have a huge set of u .. code-block:: python from pl_bolts.models.self_supervised import SimCLR - from pl_bolts.models.self_supervised.simclr import SimCLREvalDataTransform, SimCLRTrainDataTransform - + from pl_bolts.transforms.self_supervised.simclr_transforms import ( + SimCLREvalDataTransform, + SimCLRTrainDataTransform + ) train_dataset = MyDataset(transforms=SimCLRTrainDataTransform()) val_dataset = MyDataset(transforms=SimCLREvalDataTransform()) @@ -120,8 +122,10 @@ To Train:: import pytorch_lightning as pl from pl_bolts.models.self_supervised import CPC_v2 from pl_bolts.datamodules import CIFAR10DataModule - from pl_bolts.models.self_supervised.cpc import ( - CPCTrainTransformsCIFAR10, CPCEvalTransformsCIFAR10) + from pl_bolts.transforms.self_supervised.cpc_transforms import ( + CPCTrainTransformsCIFAR10, + CPCEvalTransformsCIFAR10 + ) # data dm = CIFAR10DataModule(num_workers=0) @@ -277,7 +281,9 @@ To Train:: from pl_bolts.models.self_supervised import SimCLR from pl_bolts.datamodules import CIFAR10DataModule from pl_bolts.models.self_supervised.simclr.transforms import ( - SimCLREvalDataTransform, SimCLRTrainDataTransform) + SimCLREvalDataTransform, + SimCLRTrainDataTransform + ) # data dm = CIFAR10DataModule(num_workers=0) @@ -466,7 +472,8 @@ To Train:: from pl_bolts.models.self_supervised import SwAV from pl_bolts.datamodules import STL10DataModule from pl_bolts.models.self_supervised.swav.transforms import ( - SwAVTrainDataTransform, SwAVEvalDataTransform + SwAVTrainDataTransform, + SwAVEvalDataTransform ) from pl_bolts.transforms.dataset_normalizations import stl10_normalization diff --git a/docs/source/transforms/self_supervised.rst b/docs/source/transforms/self_supervised.rst index abe7ee36e1..a7c680cf3e 100644 --- a/docs/source/transforms/self_supervised.rst +++ b/docs/source/transforms/self_supervised.rst @@ -20,17 +20,17 @@ Transforms used for CPC CIFAR-10 Train (c) ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCTrainTransformsCIFAR10 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCTrainTransformsCIFAR10 :noindex: CIFAR-10 Eval (c) ^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCEvalTransformsCIFAR10 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCEvalTransformsCIFAR10 :noindex: Imagenet Train (c) ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCTrainTransformsImageNet128 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCTrainTransformsImageNet128 :noindex: Imagenet Eval (c) From 2e9d2fd2306ef4ed13501c56c83200cba4f5996a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 27 Oct 2022 03:20:36 +0000 Subject: [PATCH 03/19] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/models/self_supervised/unit/test_transforms.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/models/self_supervised/unit/test_transforms.py b/tests/models/self_supervised/unit/test_transforms.py index b3cd189953..aff41edfc2 100644 --- a/tests/models/self_supervised/unit/test_transforms.py +++ b/tests/models/self_supervised/unit/test_transforms.py @@ -3,16 +3,16 @@ import torch from PIL import Image -from pl_bolts.transforms.self_supervised.simclr_transforms import ( - SimCLREvalDataTransform, - SimCLRFinetuneTransform, - SimCLRTrainDataTransform, -) from pl_bolts.models.self_supervised.swav.transforms import ( SwAVEvalDataTransform, SwAVFinetuneTransform, SwAVTrainDataTransform, ) +from pl_bolts.transforms.self_supervised.simclr_transforms import ( + SimCLREvalDataTransform, + SimCLRFinetuneTransform, + SimCLRTrainDataTransform, +) @pytest.mark.parametrize( From 026325a24271c006133b12c15ef536e6c24977c2 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 28 Oct 2022 10:17:42 -0400 Subject: [PATCH 04/19] Update self-supervised transforms docs. --- docs/source/transforms/self_supervised.rst | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/source/transforms/self_supervised.rst b/docs/source/transforms/self_supervised.rst index a7c680cf3e..33e19cd83c 100644 --- a/docs/source/transforms/self_supervised.rst +++ b/docs/source/transforms/self_supervised.rst @@ -35,17 +35,17 @@ Imagenet Train (c) Imagenet Eval (c) ^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCEvalTransformsImageNet128 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCEvalTransformsImageNet128 :noindex: STL-10 Train (c) ^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCTrainTransformsSTL10 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCTrainTransformsSTL10 :noindex: STL-10 Eval (c) ^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.cpc.transforms.CPCEvalTransformsSTL10 +.. autoclass:: pl_bolts.transforms.self_supervised.cpc_transforms.CPCEvalTransformsSTL10 :noindex: AMDIM transforms @@ -56,32 +56,32 @@ Transforms used for AMDIM CIFAR-10 Train (a) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMTrainTransformsCIFAR10 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMTrainTransformsCIFAR10 :noindex: CIFAR-10 Eval (a) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMEvalTransformsCIFAR10 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMEvalTransformsCIFAR10 :noindex: Imagenet Train (a) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMTrainTransformsImageNet128 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMTrainTransformsImageNet128 :noindex: Imagenet Eval (a) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMEvalTransformsImageNet128 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMEvalTransformsImageNet128 :noindex: STL-10 Train (a) ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMTrainTransformsSTL10 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMTrainTransformsSTL10 :noindex: STL-10 Eval (a) ^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.amdim.transforms.AMDIMEvalTransformsSTL10 +.. autoclass:: pl_bolts.transforms.self_supervised.amdim_transforms.AMDIMEvalTransformsSTL10 :noindex: MOCO V2 transforms @@ -92,32 +92,32 @@ Transforms used for MOCO V2 CIFAR-10 Train (m2) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2TrainCIFAR10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainCIFAR10Transforms :noindex: CIFAR-10 Eval (m2) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2EvalCIFAR10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalCIFAR10Transforms :noindex: Imagenet Train (m2) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2TrainSTL10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainSTL10Transforms :noindex: Imagenet Eval (m2) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2EvalSTL10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalSTL10Transforms :noindex: STL-10 Train (m2) ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2TrainImagenetTransforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainImagenetTransforms :noindex: STL-10 Eval (m2) ^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.moco.transforms.Moco2EvalImagenetTransforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalImagenetTransforms :noindex: SimCLR transforms @@ -126,12 +126,12 @@ Transforms used for SimCLR Train (sc) ^^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.simclr.transforms.SimCLRTrainDataTransform +.. autoclass:: pl_bolts.transforms.self_supervised.simclr_transforms.SimCLRTrainDataTransform :noindex: Eval (sc) ^^^^^^^^^ -.. autoclass:: pl_bolts.models.self_supervised.simclr.transforms.SimCLREvalDataTransform +.. autoclass:: pl_bolts.transforms.self_supervised.simclr_transforms.SimCLREvalDataTransform :noindex: From ec0868f75d374f601a5f8276d7ecb9abc3fbe8b7 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 28 Oct 2022 10:52:28 -0400 Subject: [PATCH 05/19] Add simclr transforms doc strings and typing --- .../self_supervised/simclr_transforms.py | 61 ++++++++++++++----- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/simclr_transforms.py b/pl_bolts/transforms/self_supervised/simclr_transforms.py index 37eccfd6c6..f225f0123f 100644 --- a/pl_bolts/transforms/self_supervised/simclr_transforms.py +++ b/pl_bolts/transforms/self_supervised/simclr_transforms.py @@ -1,3 +1,7 @@ +from typing import Callable, Tuple, Union + +from torch import Tensor + from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.warnings import warn_missing_pkg @@ -10,6 +14,12 @@ class SimCLRTrainDataTransform: """Transforms for SimCLR during training step of the pre-training stage. + Args: + input_height (int, optional): expected output size of image. Defaults to 224. + gaussian_blur (bool, optional): applies Gaussian blur if True. Defaults to True. + jitter_strength (float, optional): color jitter multiplier. Defaults to 1.0. + normalize (Callable, optional): optional transform to normalize. Defaults to None. + Transform:: RandomResizedCrop(size=self.input_height) @@ -21,7 +31,7 @@ class SimCLRTrainDataTransform: Example:: - from pl_bolts.models.self_supervised.simclr.transforms import SimCLRTrainDataTransform + from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLRTrainDataTransform transform = SimCLRTrainDataTransform(input_height=32) x = sample() @@ -29,7 +39,11 @@ class SimCLRTrainDataTransform: """ def __init__( - self, input_height: int = 224, gaussian_blur: bool = True, jitter_strength: float = 1.0, normalize=None + self, + input_height: int = 224, + gaussian_blur: bool = True, + jitter_strength: float = 1.0, + normalize: Union[None, Callable] = None, ) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover @@ -75,18 +89,21 @@ def __init__( [transforms.RandomResizedCrop(self.input_height), transforms.RandomHorizontalFlip(), self.final_transform] ) - def __call__(self, sample): - transform = self.train_transform - - xi = transform(sample) - xj = transform(sample) - + def __call__(self, sample: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + xi = self.transform(sample) + xj = self.transform(sample) return xi, xj, self.online_transform(sample) class SimCLREvalDataTransform(SimCLRTrainDataTransform): """Transforms for SimCLR during the validation step of the pre-training stage. + Args: + input_height (int, optional): expected output size of image. Defaults to 224. + gaussian_blur (bool, optional): applies Gaussian blur if True. Defaults to True. + jitter_strength (float, optional): color jitter multiplier. Defaults to 1.0. + normalize (Callable, optional): optional transform to normalize. Defaults to None. + Transform:: Resize(input_height + 10, interpolation=3) @@ -95,7 +112,7 @@ class SimCLREvalDataTransform(SimCLRTrainDataTransform): Example:: - from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform + from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform transform = SimCLREvalDataTransform(input_height=32) x = sample() @@ -103,8 +120,13 @@ class SimCLREvalDataTransform(SimCLRTrainDataTransform): """ def __init__( - self, input_height: int = 224, gaussian_blur: bool = True, jitter_strength: float = 1.0, normalize=None - ): + self, + input_height: int = 224, + gaussian_blur: bool = True, + jitter_strength: float = 1.0, + normalize: Union[None, Callable] = None, + ) -> None: + super().__init__( normalize=normalize, input_height=input_height, gaussian_blur=gaussian_blur, jitter_strength=jitter_strength ) @@ -122,6 +144,13 @@ def __init__( class SimCLRFinetuneTransform(SimCLRTrainDataTransform): """Transforms for SimCLR during the fine-tuning stage. + Args: + input_height (int, optional): expected output size of image. Defaults to 224. + jitter_strength (float, optional): color jitter multiplier. Defaults to 1.0. + normalize (Callable, optional): optional transform to normalize. Defaults to None. + eval_transform (bool, optional): if True, uses validation transforms. + Otherwise uses training transforms. Defaults to False. + Transform:: Resize(input_height + 10, interpolation=3) @@ -130,7 +159,7 @@ class SimCLRFinetuneTransform(SimCLRTrainDataTransform): Example:: - from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform + from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform transform = SimCLREvalDataTransform(input_height=32) x = sample() @@ -138,7 +167,11 @@ class SimCLRFinetuneTransform(SimCLRTrainDataTransform): """ def __init__( - self, input_height: int = 224, jitter_strength: float = 1.0, normalize=None, eval_transform: bool = False + self, + input_height: int = 224, + jitter_strength: float = 1.0, + normalize: Union[None, Callable] = None, + eval_transform: bool = False, ) -> None: super().__init__( @@ -153,5 +186,5 @@ def __init__( self.transform = transforms.Compose([self.data_transforms, self.final_transform]) - def __call__(self, sample): + def __call__(self, sample: Tensor) -> Tensor: return self.transform(sample) From e015d8f4d376101796e197781d667eafad869b9b Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 28 Oct 2022 11:09:13 -0400 Subject: [PATCH 06/19] Fix swav_transform import error. Change assert to assertion error. --- pl_bolts/models/self_supervised/amdim/datasets.py | 2 +- pl_bolts/transforms/self_supervised/swav_transforms.py | 9 ++++++--- tests/models/self_supervised/unit/test_transforms.py | 10 +++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/pl_bolts/models/self_supervised/amdim/datasets.py b/pl_bolts/models/self_supervised/amdim/datasets.py index bdb87e2e3a..fcbd802a29 100644 --- a/pl_bolts/models/self_supervised/amdim/datasets.py +++ b/pl_bolts/models/self_supervised/amdim/datasets.py @@ -3,7 +3,7 @@ from torch.utils.data import random_split from pl_bolts.datasets import CIFAR10Mixed, UnlabeledImagenet -from pl_bolts.transforms.self_supervised import amdim_transforms as amdim_transforms +from pl_bolts.transforms.self_supervised import amdim_transforms from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg diff --git a/pl_bolts/transforms/self_supervised/swav_transforms.py b/pl_bolts/transforms/self_supervised/swav_transforms.py index 2047563a50..a01ee7d743 100644 --- a/pl_bolts/transforms/self_supervised/swav_transforms.py +++ b/pl_bolts/transforms/self_supervised/swav_transforms.py @@ -23,9 +23,12 @@ def __init__( self.jitter_strength = jitter_strength self.gaussian_blur = gaussian_blur - assert len(size_crops) == len(nmb_crops) - assert len(min_scale_crops) == len(nmb_crops) - assert len(max_scale_crops) == len(nmb_crops) + if len(size_crops) != len(nmb_crops): + raise AssertionError("len(size_crops) should equal len(nmb_crops).") + if len(min_scale_crops) != len(nmb_crops): + raise AssertionError("len(min_scale_crops) should equal len(nmb_crops).") + if len(max_scale_crops) != len(nmb_crops): + raise AssertionError("len(max_scale_crops) should equal len(nmb_crops).") self.size_crops = size_crops self.nmb_crops = nmb_crops diff --git a/tests/models/self_supervised/unit/test_transforms.py b/tests/models/self_supervised/unit/test_transforms.py index aff41edfc2..ed3909c773 100644 --- a/tests/models/self_supervised/unit/test_transforms.py +++ b/tests/models/self_supervised/unit/test_transforms.py @@ -3,16 +3,16 @@ import torch from PIL import Image -from pl_bolts.models.self_supervised.swav.transforms import ( - SwAVEvalDataTransform, - SwAVFinetuneTransform, - SwAVTrainDataTransform, -) from pl_bolts.transforms.self_supervised.simclr_transforms import ( SimCLREvalDataTransform, SimCLRFinetuneTransform, SimCLRTrainDataTransform, ) +from pl_bolts.transforms.self_supervised.swav_transforms import ( + SwAVEvalDataTransform, + SwAVFinetuneTransform, + SwAVTrainDataTransform, +) @pytest.mark.parametrize( From a72080905127012dfce4080f79fae178a2a455a0 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 28 Oct 2022 13:24:34 -0400 Subject: [PATCH 07/19] call train_transform --- pl_bolts/transforms/self_supervised/simclr_transforms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/simclr_transforms.py b/pl_bolts/transforms/self_supervised/simclr_transforms.py index f225f0123f..c7de9c0c95 100644 --- a/pl_bolts/transforms/self_supervised/simclr_transforms.py +++ b/pl_bolts/transforms/self_supervised/simclr_transforms.py @@ -90,8 +90,8 @@ def __init__( ) def __call__(self, sample: Tensor) -> Tuple[Tensor, Tensor, Tensor]: - xi = self.transform(sample) - xj = self.transform(sample) + xi = self.train_transform(sample) + xj = self.train_transform(sample) return xi, xj, self.online_transform(sample) From 937581aef48e094ad0d6c8a85110c7423eb3ff31 Mon Sep 17 00:00:00 2001 From: Shion Date: Tue, 1 Nov 2022 16:00:03 -0400 Subject: [PATCH 08/19] Fix gaussian_blur super init arg --- pl_bolts/transforms/self_supervised/simclr_transforms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pl_bolts/transforms/self_supervised/simclr_transforms.py b/pl_bolts/transforms/self_supervised/simclr_transforms.py index c7de9c0c95..91a97f3408 100644 --- a/pl_bolts/transforms/self_supervised/simclr_transforms.py +++ b/pl_bolts/transforms/self_supervised/simclr_transforms.py @@ -175,7 +175,7 @@ def __init__( ) -> None: super().__init__( - normalize=normalize, input_height=input_height, gaussian_blur=None, jitter_strength=jitter_strength + input_height=input_height, gaussian_blur=False, jitter_strength=jitter_strength, normalize=normalize ) if eval_transform: From 421cd1a8b364599c228a02ad60474dd448dceaa0 Mon Sep 17 00:00:00 2001 From: Shion Date: Sun, 25 Dec 2022 10:35:51 -0500 Subject: [PATCH 09/19] Update moco transforms docs --- .../self_supervised/moco_transforms.py | 165 +++++++++++------- 1 file changed, 106 insertions(+), 59 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/moco_transforms.py b/pl_bolts/transforms/self_supervised/moco_transforms.py index c2a79cc65c..9738659a04 100644 --- a/pl_bolts/transforms/self_supervised/moco_transforms.py +++ b/pl_bolts/transforms/self_supervised/moco_transforms.py @@ -1,4 +1,7 @@ import random +from typing import Callable, Tuple, Union + +from torch import Tensor from pl_bolts.transforms.dataset_normalizations import ( cifar10_normalization, @@ -6,7 +9,6 @@ stl10_normalization, ) from pl_bolts.utils import _PIL_AVAILABLE, _TORCHVISION_AVAILABLE -from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -20,21 +22,72 @@ warn_missing_pkg("PIL", pypi_name="Pillow") -@under_review() -class Moco2TrainCIFAR10Transforms: - """Moco 2 augmentation: +class MoCoTrainTransforms: + + normalization: type + + """MoCo training transforms. + + Args: + + Example:: + """ + + def __init__(self, size: int, normalize: Union[str, Callable]) -> None: + if isinstance(normalize, str): + self.normalize = normalize + else: + self.normalize = normalize + + self.train_transform = transforms.Compose( + [ + transforms.RandomResizedCrop(size, scale=(0.2, 1.0)), + transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), # not strengthened + transforms.RandomGrayscale(p=0.2), + transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize(), + ] + ) + + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.train_transform(x) + k = self.train_transform(x) + return q, k + + +class MoCo2TrainCIFAR10Transforms: + """MoCo v2 transforms. + + Args: + size (int, optional): input size. Defaults to 32. + + Transform:: + + RandomResizedCrop(size=self.input_size) + + Example:: + + from pl_bolts.transforms.self_supervised.MoCo_transforms import MoCo2TrainCIFAR10Transforms + + transform = MoCo2TrainCIFAR10Transforms(input_size=32) + x = sample() + (xi, xj) = transform(x) + + MoCo 2 augmentation: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 32): + def __init__(self, size: int = 32) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") # image augmentation functions self.train_transform = transforms.Compose( [ - transforms.RandomResizedCrop(height, scale=(0.2, 1.0)), + transforms.RandomResizedCrop(size, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), # not strengthened transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5), @@ -44,151 +97,145 @@ def __init__(self, height: int = 32): ] ) - def __call__(self, inp): - q = self.train_transform(inp) - k = self.train_transform(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.train_transform(x) + k = self.train_transform(x) return q, k -@under_review() -class Moco2EvalCIFAR10Transforms: - """Moco 2 augmentation: +class MoCo2EvalCIFAR10Transforms: + """MoCo 2 augmentation: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 32): + def __init__(self, size: int = 32) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") self.test_transform = transforms.Compose( [ - transforms.Resize(height + 12), - transforms.CenterCrop(height), + transforms.Resize(size + 12), + transforms.CenterCrop(size), transforms.ToTensor(), cifar10_normalization(), ] ) - def __call__(self, inp): - q = self.test_transform(inp) - k = self.test_transform(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.test_transform(x) + k = self.test_transform(x) return q, k -@under_review() -class Moco2TrainSTL10Transforms: - """Moco 2 augmentation: +class MoCo2TrainImagenetTransforms: + """MoCo 2 augmentation: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 64): + def __init__(self, size: int = 224): if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") # image augmentation functions self.train_transform = transforms.Compose( [ - transforms.RandomResizedCrop(height, scale=(0.2, 1.0)), + transforms.RandomResizedCrop(size, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), # not strengthened transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), - stl10_normalization(), + imagenet_normalization(), ] ) - def __call__(self, inp): - q = self.train_transform(inp) - k = self.train_transform(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.train_transform(x) + k = self.train_transform(x) return q, k -@under_review() -class Moco2EvalSTL10Transforms: - """Moco 2 augmentation: +class MoCo2EvalImagenetTransforms: + """Transforms for MoCo during training step. https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 64): + def __init__(self, size: int = 128): if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") - self.test_augmentation = transforms.Compose( + self.test_transform = transforms.Compose( [ - transforms.Resize(height + 11), - transforms.CenterCrop(height), + transforms.Resize(size + 32), + transforms.CenterCrop(size), transforms.ToTensor(), - stl10_normalization(), + imagenet_normalization(), ] ) - def __call__(self, inp): - q = self.test_augmentation(inp) - k = self.test_augmentation(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.test_transform(x) + k = self.test_transform(x) return q, k -@under_review() -class Moco2TrainImagenetTransforms: - """Moco 2 augmentation: +class MoCo2TrainSTL10Transforms: + """MoCo 2 augmentation: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 128): + def __init__(self, size: int = 64): if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") # image augmentation functions self.train_transform = transforms.Compose( [ - transforms.RandomResizedCrop(height, scale=(0.2, 1.0)), + transforms.RandomResizedCrop(size, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), # not strengthened transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), - imagenet_normalization(), + stl10_normalization(), ] ) - def __call__(self, inp): - q = self.train_transform(inp) - k = self.train_transform(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.train_transform(x) + k = self.train_transform(x) return q, k -@under_review() -class Moco2EvalImagenetTransforms: - """Moco 2 augmentation: +class MoCo2EvalSTL10Transforms: + """MoCo 2 augmentation: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, height: int = 128): + def __init__(self, size: int = 64): if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") - self.test_transform = transforms.Compose( + self.test_augmentation = transforms.Compose( [ - transforms.Resize(height + 32), - transforms.CenterCrop(height), + transforms.Resize(size + 11), + transforms.CenterCrop(size), transforms.ToTensor(), - imagenet_normalization(), + stl10_normalization(), ] ) - def __call__(self, inp): - q = self.test_transform(inp) - k = self.test_transform(inp) + def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: + q = self.test_augmentation(x) + k = self.test_augmentation(x) return q, k -@under_review() class GaussianBlur: """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709.""" From a9719c15d55de5cd09f9ecfcf188c04d3296608b Mon Sep 17 00:00:00 2001 From: Shion Date: Thu, 5 Jan 2023 23:08:20 +0900 Subject: [PATCH 10/19] Transfer MoCo transforms. MoCo tests passing. --- .../models/self_supervised/moco/__init__.py | 12 +++++----- .../self_supervised/moco/moco2_module.py | 24 +++++++++---------- tests/transforms/test_transforms.py | 24 +++++++++---------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/pl_bolts/models/self_supervised/moco/__init__.py b/pl_bolts/models/self_supervised/moco/__init__.py index 8c4ed4fd51..98527ec844 100644 --- a/pl_bolts/models/self_supervised/moco/__init__.py +++ b/pl_bolts/models/self_supervised/moco/__init__.py @@ -1,8 +1,8 @@ from pl_bolts.transforms.self_supervised.moco_transforms import ( # noqa: F401 - Moco2EvalCIFAR10Transforms, - Moco2EvalImagenetTransforms, - Moco2EvalSTL10Transforms, - Moco2TrainCIFAR10Transforms, - Moco2TrainImagenetTransforms, - Moco2TrainSTL10Transforms, + MoCo2EvalCIFAR10Transforms, + MoCo2EvalImagenetTransforms, + MoCo2EvalSTL10Transforms, + MoCo2TrainCIFAR10Transforms, + MoCo2TrainImagenetTransforms, + MoCo2TrainSTL10Transforms, ) diff --git a/pl_bolts/models/self_supervised/moco/moco2_module.py b/pl_bolts/models/self_supervised/moco/moco2_module.py index 083c6b2fd5..ed6f7885d8 100644 --- a/pl_bolts/models/self_supervised/moco/moco2_module.py +++ b/pl_bolts/models/self_supervised/moco/moco2_module.py @@ -19,12 +19,12 @@ from pl_bolts.metrics import mean, precision_at_k from pl_bolts.transforms.self_supervised.moco_transforms import ( - Moco2EvalCIFAR10Transforms, - Moco2EvalImagenetTransforms, - Moco2EvalSTL10Transforms, - Moco2TrainCIFAR10Transforms, - Moco2TrainImagenetTransforms, - Moco2TrainSTL10Transforms, + MoCo2EvalCIFAR10Transforms, + MoCo2EvalImagenetTransforms, + MoCo2EvalSTL10Transforms, + MoCo2TrainCIFAR10Transforms, + MoCo2TrainImagenetTransforms, + MoCo2TrainSTL10Transforms, ) from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.stability import under_review @@ -372,20 +372,20 @@ def cli_main(): if args.dataset == "cifar10": datamodule = CIFAR10DataModule.from_argparse_args(args) - datamodule.train_transforms = Moco2TrainCIFAR10Transforms() - datamodule.val_transforms = Moco2EvalCIFAR10Transforms() + datamodule.train_transforms = MoCo2TrainCIFAR10Transforms() + datamodule.val_transforms = MoCo2EvalCIFAR10Transforms() elif args.dataset == "stl10": datamodule = STL10DataModule.from_argparse_args(args) datamodule.train_dataloader = datamodule.train_dataloader_mixed datamodule.val_dataloader = datamodule.val_dataloader_mixed - datamodule.train_transforms = Moco2TrainSTL10Transforms() - datamodule.val_transforms = Moco2EvalSTL10Transforms() + datamodule.train_transforms = MoCo2TrainSTL10Transforms() + datamodule.val_transforms = MoCo2EvalSTL10Transforms() elif args.dataset == "imagenet2012": datamodule = SSLImagenetDataModule.from_argparse_args(args) - datamodule.train_transforms = Moco2TrainImagenetTransforms() - datamodule.val_transforms = Moco2EvalImagenetTransforms() + datamodule.train_transforms = MoCo2TrainImagenetTransforms() + datamodule.val_transforms = MoCo2EvalImagenetTransforms() else: # replace with your own dataset, otherwise CIFAR-10 will be used by default if `None` passed in diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py index 2f4f493f77..a2082a19a8 100644 --- a/tests/transforms/test_transforms.py +++ b/tests/transforms/test_transforms.py @@ -28,12 +28,12 @@ CPCTrainTransformsSTL10, ) from pl_bolts.transforms.self_supervised.moco_transforms import ( - Moco2EvalCIFAR10Transforms, - Moco2EvalImagenetTransforms, - Moco2EvalSTL10Transforms, - Moco2TrainCIFAR10Transforms, - Moco2TrainImagenetTransforms, - Moco2TrainSTL10Transforms, + MoCo2EvalCIFAR10Transforms, + MoCo2EvalImagenetTransforms, + MoCo2EvalSTL10Transforms, + MoCo2TrainCIFAR10Transforms, + MoCo2TrainImagenetTransforms, + MoCo2TrainSTL10Transforms, ) from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform @@ -67,8 +67,8 @@ def test_simclr_transforms(img_size): CPCEvalTransformsCIFAR10, AMDIMEvalTransformsCIFAR10, AMDIMTrainTransformsCIFAR10, - Moco2TrainCIFAR10Transforms, - Moco2EvalCIFAR10Transforms, + MoCo2TrainCIFAR10Transforms, + MoCo2EvalCIFAR10Transforms, ], ) def test_cifar10_transforms(transform): @@ -86,8 +86,8 @@ def test_cifar10_transforms(transform): CPCEvalTransformsSTL10, AMDIMTrainTransformsSTL10, AMDIMEvalTransformsSTL10, - Moco2TrainSTL10Transforms, - Moco2EvalSTL10Transforms, + MoCo2TrainSTL10Transforms, + MoCo2EvalSTL10Transforms, ], ) def test_stl10_transforms(transform): @@ -105,8 +105,8 @@ def test_stl10_transforms(transform): CPCEvalTransformsImageNet128, AMDIMTrainTransformsImageNet128, AMDIMEvalTransformsImageNet128, - Moco2TrainImagenetTransforms, - Moco2EvalImagenetTransforms, + MoCo2TrainImagenetTransforms, + MoCo2EvalImagenetTransforms, ], ) def test_imagenet_transforms(transform): From 3f2645413d48142ea1bfdc39d23adcc36e64cf09 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 6 Jan 2023 10:46:26 +0900 Subject: [PATCH 11/19] Fix interpolation mode deprecation warning --- .../self_supervised/amdim_transforms.py | 60 +++++++++++-------- tests/models/self_supervised/test_models.py | 6 +- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/amdim_transforms.py b/pl_bolts/transforms/self_supervised/amdim_transforms.py index 221c87668a..f2d80e8752 100644 --- a/pl_bolts/transforms/self_supervised/amdim_transforms.py +++ b/pl_bolts/transforms/self_supervised/amdim_transforms.py @@ -1,6 +1,10 @@ +from typing import Tuple + +from torch import Tensor +from torchvision.transforms import InterpolationMode + from pl_bolts.transforms.self_supervised import RandomTranslateWithReflect from pl_bolts.utils import _TORCHVISION_AVAILABLE -from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -9,7 +13,6 @@ warn_missing_pkg("torchvision") -@under_review() class AMDIMTrainTransformsCIFAR10: """Transforms applied to AMDIM. @@ -29,7 +32,7 @@ class AMDIMTrainTransformsCIFAR10: (view1, view2) = transform(x) """ - def __init__(self): + def __init__(self) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -47,14 +50,13 @@ def __init__(self): self.transforms = transforms.Compose([img_jitter, col_jitter, rnd_gray, transforms.ToTensor(), normalize]) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: inp = self.flip_lr(inp) out1 = self.transforms(inp) out2 = self.transforms(inp) return out1, out2 -@under_review() class AMDIMEvalTransformsCIFAR10: """Transforms applied to AMDIM. @@ -71,7 +73,7 @@ class AMDIMEvalTransformsCIFAR10: (view1, view2) = transform(x) """ - def __init__(self): + def __init__(self) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -85,13 +87,12 @@ def __init__(self): # transform for testing self.transforms = transforms.Compose([transforms.ToTensor(), normalize]) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 -@under_review() class AMDIMTrainTransformsSTL10: """Transforms applied to AMDIM. @@ -111,7 +112,7 @@ class AMDIMTrainTransformsSTL10: (view1, view2) = transform(x) """ - def __init__(self, height=64): + def __init__(self, height: int = 64) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -121,24 +122,25 @@ def __init__(self, height=64): # image augmentation functions col_jitter = transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.25) - rand_crop = transforms.RandomResizedCrop(height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3) + rand_crop = transforms.RandomResizedCrop( + height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=InterpolationMode.BICUBIC + ) self.transforms = transforms.Compose([rand_crop, col_jitter, rnd_gray, transforms.ToTensor(), normalize]) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: inp = self.flip_lr(inp) out1 = self.transforms(inp) out2 = self.transforms(inp) return out1, out2 -@under_review() class AMDIMEvalTransformsSTL10: """Transforms applied to AMDIM. Transforms:: - transforms.Resize(height + 6, interpolation=3), + transforms.Resize(height + 6, interpolation=InterpolationMode.BICUBIC), transforms.CenterCrop(height), transforms.ToTensor(), normalize @@ -151,31 +153,32 @@ class AMDIMEvalTransformsSTL10: view1 = transform(x) """ - def __init__(self, height=64): + def __init__(self, height: int = 64) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") # flipping image along vertical axis self.flip_lr = transforms.RandomHorizontalFlip(p=0.5) normalize = transforms.Normalize(mean=(0.43, 0.42, 0.39), std=(0.27, 0.26, 0.27)) - transforms.RandomResizedCrop(height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3) + transforms.RandomResizedCrop( + height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=InterpolationMode.BICUBIC + ) self.transforms = transforms.Compose( [ - transforms.Resize(height + 6, interpolation=3), + transforms.Resize(height + 6, interpolation=InterpolationMode.BICUBIC), transforms.CenterCrop(height), transforms.ToTensor(), normalize, ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 -@under_review() class AMDIMTrainTransformsImageNet128: """Transforms applied to AMDIM. @@ -195,13 +198,15 @@ class AMDIMTrainTransformsImageNet128: (view1, view2) = transform(x) """ - def __init__(self, height=128): + def __init__(self, height: int = 128) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") # image augmentation functions self.flip_lr = transforms.RandomHorizontalFlip(p=0.5) - rand_crop = transforms.RandomResizedCrop(height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3) + rand_crop = transforms.RandomResizedCrop( + height, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=InterpolationMode.BICUBIC + ) col_jitter = transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.25) post_transform = transforms.Compose( @@ -212,20 +217,19 @@ def __init__(self, height=128): ) self.transforms = transforms.Compose([rand_crop, col_jitter, rnd_gray, post_transform]) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: inp = self.flip_lr(inp) out1 = self.transforms(inp) out2 = self.transforms(inp) return out1, out2 -@under_review() class AMDIMEvalTransformsImageNet128: """Transforms applied to AMDIM. Transforms:: - transforms.Resize(height + 6, interpolation=3), + transforms.Resize(height + 6, interpolation=InterpolationMode.BICUBIC), transforms.CenterCrop(height), transforms.ToTensor(), normalize @@ -238,7 +242,7 @@ class AMDIMEvalTransformsImageNet128: view1 = transform(x) """ - def __init__(self, height=128): + def __init__(self, height: int = 128) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -251,10 +255,14 @@ def __init__(self, height=128): ] ) self.transforms = transforms.Compose( - [transforms.Resize(height + 18, interpolation=3), transforms.CenterCrop(height), post_transform] + [ + transforms.Resize(height + 18, interpolation=InterpolationMode.BICUBIC), + transforms.CenterCrop(height), + post_transform, + ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 diff --git a/tests/models/self_supervised/test_models.py b/tests/models/self_supervised/test_models.py index fe71e73863..2742923020 100644 --- a/tests/models/self_supervised/test_models.py +++ b/tests/models/self_supervised/test_models.py @@ -10,7 +10,7 @@ from pl_bolts.models.self_supervised.cpc import CPCEvalTransformsCIFAR10, CPCTrainTransformsCIFAR10 from pl_bolts.models.self_supervised.moco.callbacks import MocoLRScheduler from pl_bolts.transforms.dataset_normalizations import cifar10_normalization -from pl_bolts.transforms.self_supervised.moco_transforms import Moco2EvalCIFAR10Transforms, Moco2TrainCIFAR10Transforms +from pl_bolts.transforms.self_supervised.moco_transforms import MoCo2EvalCIFAR10Transforms, MoCo2TrainCIFAR10Transforms from pl_bolts.transforms.self_supervised.simclr_transforms import SimCLREvalDataTransform, SimCLRTrainDataTransform from pl_bolts.transforms.self_supervised.swav_transforms import SwAVEvalDataTransform, SwAVTrainDataTransform from tests import _MARK_REQUIRE_GPU @@ -68,8 +68,8 @@ def test_amdim(tmpdir, datadir): def test_moco(tmpdir, datadir): datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) - datamodule.train_transforms = Moco2TrainCIFAR10Transforms() - datamodule.val_transforms = Moco2EvalCIFAR10Transforms() + datamodule.train_transforms = MoCo2TrainCIFAR10Transforms() + datamodule.val_transforms = MoCo2EvalCIFAR10Transforms() model = Moco_v2(data_dir=datadir, batch_size=2, online_ft=True) trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, callbacks=[MocoLRScheduler()]) From 7062702677ba3f5e673b845a6bc612e8be655502 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 6 Jan 2023 12:15:25 +0900 Subject: [PATCH 12/19] CPC_transforms typing and interpolation deprecation fix. --- .../self_supervised/cpc_transforms.py | 50 ++++++++++--------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/cpc_transforms.py b/pl_bolts/transforms/self_supervised/cpc_transforms.py index 796d169ab8..81c8c17e9f 100644 --- a/pl_bolts/transforms/self_supervised/cpc_transforms.py +++ b/pl_bolts/transforms/self_supervised/cpc_transforms.py @@ -1,6 +1,8 @@ +from torch import Tensor +from torchvision.transforms import InterpolationMode + from pl_bolts.transforms.self_supervised import Patchify, RandomTranslateWithReflect from pl_bolts.utils import _TORCHVISION_AVAILABLE -from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -9,7 +11,6 @@ warn_missing_pkg("torchvision") -@under_review() class CPCTrainTransformsCIFAR10: """Transforms used for CPC: @@ -33,7 +34,7 @@ class CPCTrainTransformsCIFAR10: train_loader = module.train_dataloader(batch_size=32, transforms=CPCTrainTransformsCIFAR10()) """ - def __init__(self, patch_size=8, overlap=4): + def __init__(self, patch_size: int = 8, overlap: int = 4) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -65,13 +66,12 @@ def __init__(self, patch_size=8, overlap=4): ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 -@under_review() class CPCEvalTransformsCIFAR10: """Transforms used for CPC: @@ -92,7 +92,7 @@ class CPCEvalTransformsCIFAR10: train_loader = module.train_dataloader(batch_size=32, transforms=CPCEvalTransformsCIFAR10()) """ - def __init__(self, patch_size: int = 8, overlap: int = 4): + def __init__(self, patch_size: int = 8, overlap: int = 4) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -119,12 +119,11 @@ def __init__(self, patch_size: int = 8, overlap: int = 4): ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: out1 = self.transforms(inp) return out1 -@under_review() class CPCTrainTransformsSTL10: """Transforms used for CPC: @@ -148,7 +147,7 @@ class CPCTrainTransformsSTL10: train_loader = module.train_dataloader(batch_size=32, transforms=CPCTrainTransformsSTL10()) """ - def __init__(self, patch_size: int = 16, overlap: int = 8): + def __init__(self, patch_size: int = 16, overlap: int = 8) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -166,7 +165,9 @@ def __init__(self, patch_size: int = 16, overlap: int = 8): # image augmentation functions col_jitter = transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.25) - rand_crop = transforms.RandomResizedCrop(64, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3) + rand_crop = transforms.RandomResizedCrop( + 64, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=InterpolationMode.BICUBIC + ) self.transforms = transforms.Compose( [ @@ -179,13 +180,12 @@ def __init__(self, patch_size: int = 16, overlap: int = 8): ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 -@under_review() class CPCEvalTransformsSTL10: """Transforms used for CPC: @@ -206,7 +206,7 @@ class CPCEvalTransformsSTL10: train_loader = module.train_dataloader(batch_size=32, transforms=CPCEvalTransformsSTL10()) """ - def __init__(self, patch_size: int = 16, overlap: int = 8): + def __init__(self, patch_size: int = 16, overlap: int = 8) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -223,7 +223,7 @@ def __init__(self, patch_size: int = 16, overlap: int = 8): self.transforms = transforms.Compose( [ - transforms.Resize(70, interpolation=3), + transforms.Resize(70, interpolation=InterpolationMode.BICUBIC), transforms.CenterCrop(64), transforms.ToTensor(), normalize, @@ -231,12 +231,11 @@ def __init__(self, patch_size: int = 16, overlap: int = 8): ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: out1 = self.transforms(inp) return out1 -@under_review() class CPCTrainTransformsImageNet128: """Transforms used for CPC: @@ -257,7 +256,7 @@ class CPCTrainTransformsImageNet128: train_loader = module.train_dataloader(batch_size=32, transforms=CPCTrainTransformsImageNet128()) """ - def __init__(self, patch_size: int = 32, overlap: int = 16): + def __init__(self, patch_size: int = 32, overlap: int = 16) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -270,7 +269,9 @@ def __init__(self, patch_size: int = 32, overlap: int = 16): self.patch_size = patch_size self.overlap = overlap self.flip_lr = transforms.RandomHorizontalFlip(p=0.5) - rand_crop = transforms.RandomResizedCrop(128, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=3) + rand_crop = transforms.RandomResizedCrop( + 128, scale=(0.3, 1.0), ratio=(0.7, 1.4), interpolation=InterpolationMode.BICUBIC + ) col_jitter = transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8) rnd_gray = transforms.RandomGrayscale(p=0.25) @@ -284,13 +285,12 @@ def __init__(self, patch_size: int = 32, overlap: int = 16): self.transforms = transforms.Compose([rand_crop, col_jitter, rnd_gray, post_transform]) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 -@under_review() class CPCEvalTransformsImageNet128: """Transforms used for CPC: @@ -311,7 +311,7 @@ class CPCEvalTransformsImageNet128: train_loader = module.train_dataloader(batch_size=32, transforms=CPCEvalTransformsImageNet128()) """ - def __init__(self, patch_size: int = 32, overlap: int = 16): + def __init__(self, patch_size: int = 32, overlap: int = 16) -> None: """ Args: patch_size: size of patches when cutting up the image into overlapping patches @@ -332,10 +332,14 @@ def __init__(self, patch_size: int = 32, overlap: int = 16): ] ) self.transforms = transforms.Compose( - [transforms.Resize(146, interpolation=3), transforms.CenterCrop(128), post_transform] + [ + transforms.Resize(146, interpolation=InterpolationMode.BICUBIC), + transforms.CenterCrop(128), + post_transform, + ] ) - def __call__(self, inp): + def __call__(self, inp: Tensor) -> Tensor: inp = self.flip_lr(inp) out1 = self.transforms(inp) return out1 From cb045e0c9096600c2ef75abde36b933f11ae6dc5 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 6 Jan 2023 16:16:38 +0900 Subject: [PATCH 13/19] MoCo transforms typing hints --- .../transforms/self_supervised/moco_transforms.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/moco_transforms.py b/pl_bolts/transforms/self_supervised/moco_transforms.py index 9738659a04..86808c3c49 100644 --- a/pl_bolts/transforms/self_supervised/moco_transforms.py +++ b/pl_bolts/transforms/self_supervised/moco_transforms.py @@ -1,5 +1,5 @@ import random -from typing import Callable, Tuple, Union +from typing import Callable, List, Tuple, Union from torch import Tensor @@ -134,7 +134,7 @@ class MoCo2TrainImagenetTransforms: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, size: int = 224): + def __init__(self, size: int = 224) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -163,7 +163,7 @@ class MoCo2EvalImagenetTransforms: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, size: int = 128): + def __init__(self, size: int = 128) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -188,7 +188,7 @@ class MoCo2TrainSTL10Transforms: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, size: int = 64): + def __init__(self, size: int = 64) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -217,7 +217,7 @@ class MoCo2EvalSTL10Transforms: https://arxiv.org/pdf/2003.04297.pdf """ - def __init__(self, size: int = 64): + def __init__(self, size: int = 64) -> None: if not _TORCHVISION_AVAILABLE: # pragma: no cover raise ModuleNotFoundError("You want to use `transforms` from `torchvision` which is not installed yet.") @@ -239,7 +239,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: class GaussianBlur: """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709.""" - def __init__(self, sigma=(0.1, 2.0)): + def __init__(self, sigma: List[float] = [0.1, 2.0]) -> None: if not _PIL_AVAILABLE: # pragma: no cover raise ModuleNotFoundError( "You want to use `Pillow` which is not installed yet, install it with `pip install Pillow`." From b0fbdf8890946a2779cbc2da864a970d6b4e6064 Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 6 Jan 2023 17:16:23 +0900 Subject: [PATCH 14/19] Keep under_review tags for unreviewed files --- .../transforms/self_supervised/amdim_transforms.py | 7 +++++++ .../transforms/self_supervised/cpc_transforms.py | 7 +++++++ .../transforms/self_supervised/moco_transforms.py | 9 +++++++++ .../transforms/self_supervised/swav_transforms.py | 14 +++++++++----- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/pl_bolts/transforms/self_supervised/amdim_transforms.py b/pl_bolts/transforms/self_supervised/amdim_transforms.py index f2d80e8752..4d6af99be2 100644 --- a/pl_bolts/transforms/self_supervised/amdim_transforms.py +++ b/pl_bolts/transforms/self_supervised/amdim_transforms.py @@ -5,6 +5,7 @@ from pl_bolts.transforms.self_supervised import RandomTranslateWithReflect from pl_bolts.utils import _TORCHVISION_AVAILABLE +from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -13,6 +14,7 @@ warn_missing_pkg("torchvision") +@under_review() class AMDIMTrainTransformsCIFAR10: """Transforms applied to AMDIM. @@ -57,6 +59,7 @@ def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: return out1, out2 +@under_review() class AMDIMEvalTransformsCIFAR10: """Transforms applied to AMDIM. @@ -93,6 +96,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class AMDIMTrainTransformsSTL10: """Transforms applied to AMDIM. @@ -135,6 +139,7 @@ def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: return out1, out2 +@under_review() class AMDIMEvalTransformsSTL10: """Transforms applied to AMDIM. @@ -179,6 +184,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class AMDIMTrainTransformsImageNet128: """Transforms applied to AMDIM. @@ -224,6 +230,7 @@ def __call__(self, inp: Tensor) -> Tuple[Tensor, Tensor]: return out1, out2 +@under_review() class AMDIMEvalTransformsImageNet128: """Transforms applied to AMDIM. diff --git a/pl_bolts/transforms/self_supervised/cpc_transforms.py b/pl_bolts/transforms/self_supervised/cpc_transforms.py index 81c8c17e9f..438da5e587 100644 --- a/pl_bolts/transforms/self_supervised/cpc_transforms.py +++ b/pl_bolts/transforms/self_supervised/cpc_transforms.py @@ -3,6 +3,7 @@ from pl_bolts.transforms.self_supervised import Patchify, RandomTranslateWithReflect from pl_bolts.utils import _TORCHVISION_AVAILABLE +from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -11,6 +12,7 @@ warn_missing_pkg("torchvision") +@under_review() class CPCTrainTransformsCIFAR10: """Transforms used for CPC: @@ -72,6 +74,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class CPCEvalTransformsCIFAR10: """Transforms used for CPC: @@ -124,6 +127,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class CPCTrainTransformsSTL10: """Transforms used for CPC: @@ -186,6 +190,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class CPCEvalTransformsSTL10: """Transforms used for CPC: @@ -236,6 +241,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class CPCTrainTransformsImageNet128: """Transforms used for CPC: @@ -291,6 +297,7 @@ def __call__(self, inp: Tensor) -> Tensor: return out1 +@under_review() class CPCEvalTransformsImageNet128: """Transforms used for CPC: diff --git a/pl_bolts/transforms/self_supervised/moco_transforms.py b/pl_bolts/transforms/self_supervised/moco_transforms.py index 86808c3c49..4f4f116dec 100644 --- a/pl_bolts/transforms/self_supervised/moco_transforms.py +++ b/pl_bolts/transforms/self_supervised/moco_transforms.py @@ -9,6 +9,7 @@ stl10_normalization, ) from pl_bolts.utils import _PIL_AVAILABLE, _TORCHVISION_AVAILABLE +from pl_bolts.utils.stability import under_review from pl_bolts.utils.warnings import warn_missing_pkg if _TORCHVISION_AVAILABLE: @@ -22,6 +23,7 @@ warn_missing_pkg("PIL", pypi_name="Pillow") +@under_review() class MoCoTrainTransforms: normalization: type @@ -57,6 +59,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2TrainCIFAR10Transforms: """MoCo v2 transforms. @@ -103,6 +106,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2EvalCIFAR10Transforms: """MoCo 2 augmentation: @@ -128,6 +132,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2TrainImagenetTransforms: """MoCo 2 augmentation: @@ -157,6 +162,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2EvalImagenetTransforms: """Transforms for MoCo during training step. @@ -182,6 +188,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2TrainSTL10Transforms: """MoCo 2 augmentation: @@ -211,6 +218,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class MoCo2EvalSTL10Transforms: """MoCo 2 augmentation: @@ -236,6 +244,7 @@ def __call__(self, x: Tensor) -> Tuple[Tensor, Tensor]: return q, k +@under_review() class GaussianBlur: """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709.""" diff --git a/pl_bolts/transforms/self_supervised/swav_transforms.py b/pl_bolts/transforms/self_supervised/swav_transforms.py index a01ee7d743..ad57695c21 100644 --- a/pl_bolts/transforms/self_supervised/swav_transforms.py +++ b/pl_bolts/transforms/self_supervised/swav_transforms.py @@ -1,4 +1,6 @@ -from typing import Tuple +from typing import List, Tuple + +from torch import Tensor from pl_bolts.utils import _TORCHVISION_AVAILABLE from pl_bolts.utils.warnings import warn_missing_pkg @@ -19,7 +21,8 @@ def __init__( max_scale_crops: Tuple[float] = (1, 0.33), gaussian_blur: bool = True, jitter_strength: float = 1.0, - ): + ) -> None: + self.jitter_strength = jitter_strength self.gaussian_blur = gaussian_blur @@ -89,7 +92,7 @@ def __init__( self.transform.append(online_train_transform) - def __call__(self, sample): + def __call__(self, sample: Tensor) -> List[Tensor]: multi_crops = list(map(lambda transform: transform(sample), self.transform)) return multi_crops @@ -105,7 +108,8 @@ def __init__( max_scale_crops: Tuple[float] = (1, 0.33), gaussian_blur: bool = True, jitter_strength: float = 1.0, - ): + ) -> None: + super().__init__( normalize=normalize, size_crops=size_crops, @@ -166,5 +170,5 @@ def __init__( data_transforms.append(final_transform) self.transform = transforms.Compose(data_transforms) - def __call__(self, sample): + def __call__(self, sample: Tensor) -> Tensor: return self.transform(sample) From 7b60348faabf28ad37aed8234eb96e40603dd42b Mon Sep 17 00:00:00 2001 From: Shion Date: Fri, 6 Jan 2023 23:16:37 +0900 Subject: [PATCH 15/19] Fix MoCo docs error --- docs/source/transforms/self_supervised.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/transforms/self_supervised.rst b/docs/source/transforms/self_supervised.rst index 33e19cd83c..08f18bee2f 100644 --- a/docs/source/transforms/self_supervised.rst +++ b/docs/source/transforms/self_supervised.rst @@ -92,32 +92,32 @@ Transforms used for MOCO V2 CIFAR-10 Train (m2) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainCIFAR10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2TrainCIFAR10Transforms :noindex: CIFAR-10 Eval (m2) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalCIFAR10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2EvalCIFAR10Transforms :noindex: Imagenet Train (m2) ^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainSTL10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2TrainSTL10Transforms :noindex: Imagenet Eval (m2) ^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalSTL10Transforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2EvalSTL10Transforms :noindex: STL-10 Train (m2) ^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2TrainImagenetTransforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2TrainImagenetTransforms :noindex: STL-10 Eval (m2) ^^^^^^^^^^^^^^^^^^^ -.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.Moco2EvalImagenetTransforms +.. autoclass:: pl_bolts.transforms.self_supervised.moco_transforms.MoCo2EvalImagenetTransforms :noindex: SimCLR transforms From bd23c27f3adc7712a268c22e19edf05b78f7b117 Mon Sep 17 00:00:00 2001 From: Jirka B Date: Fri, 19 May 2023 10:17:31 -0400 Subject: [PATCH 16/19] update mergify team --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index dc431ff4c5..314ae28dca 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -79,4 +79,4 @@ pull_request_rules: actions: request_reviews: teams: - - "@PyTorchLightning/core-bolts" + - "@Lightning-Universe/core-Bolts" From e3ab9056a73b470fbef29179cb3033d3875e8e53 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 15:44:13 +0000 Subject: [PATCH 17/19] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pl_bolts/transforms/self_supervised/moco_transforms.py | 1 - src/pl_bolts/transforms/self_supervised/simclr_transforms.py | 1 - src/pl_bolts/transforms/self_supervised/swav_transforms.py | 2 -- 3 files changed, 4 deletions(-) diff --git a/src/pl_bolts/transforms/self_supervised/moco_transforms.py b/src/pl_bolts/transforms/self_supervised/moco_transforms.py index 4f4f116dec..f958bad1b6 100644 --- a/src/pl_bolts/transforms/self_supervised/moco_transforms.py +++ b/src/pl_bolts/transforms/self_supervised/moco_transforms.py @@ -25,7 +25,6 @@ @under_review() class MoCoTrainTransforms: - normalization: type """MoCo training transforms. diff --git a/src/pl_bolts/transforms/self_supervised/simclr_transforms.py b/src/pl_bolts/transforms/self_supervised/simclr_transforms.py index a2ca30eecf..672e2a94a1 100644 --- a/src/pl_bolts/transforms/self_supervised/simclr_transforms.py +++ b/src/pl_bolts/transforms/self_supervised/simclr_transforms.py @@ -125,7 +125,6 @@ def __init__( jitter_strength: float = 1.0, normalize: Union[None, Callable] = None, ) -> None: - super().__init__( normalize=normalize, input_height=input_height, gaussian_blur=gaussian_blur, jitter_strength=jitter_strength ) diff --git a/src/pl_bolts/transforms/self_supervised/swav_transforms.py b/src/pl_bolts/transforms/self_supervised/swav_transforms.py index b78397717b..8f9d4509ed 100644 --- a/src/pl_bolts/transforms/self_supervised/swav_transforms.py +++ b/src/pl_bolts/transforms/self_supervised/swav_transforms.py @@ -22,7 +22,6 @@ def __init__( gaussian_blur: bool = True, jitter_strength: float = 1.0, ) -> None: - self.jitter_strength = jitter_strength self.gaussian_blur = gaussian_blur @@ -109,7 +108,6 @@ def __init__( gaussian_blur: bool = True, jitter_strength: float = 1.0, ) -> None: - super().__init__( normalize=normalize, size_crops=size_crops, From afdc09aa7761cf1dc4b960701d780f1972be75b3 Mon Sep 17 00:00:00 2001 From: Jirka B Date: Fri, 19 May 2023 10:17:31 -0400 Subject: [PATCH 18/19] update mergify team --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index dc431ff4c5..e53eba78f4 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -79,4 +79,4 @@ pull_request_rules: actions: request_reviews: teams: - - "@PyTorchLightning/core-bolts" + - "@Lightning-Universe/core-bolts" From 383ecfce8ce1a1dc92cdcc74e2129549126bf327 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 20 May 2023 20:57:41 +0000 Subject: [PATCH 19/19] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/pl_bolts/transforms/self_supervised/moco_transforms.py | 1 - src/pl_bolts/transforms/self_supervised/swav_transforms.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pl_bolts/transforms/self_supervised/moco_transforms.py b/src/pl_bolts/transforms/self_supervised/moco_transforms.py index f958bad1b6..1d6531f568 100644 --- a/src/pl_bolts/transforms/self_supervised/moco_transforms.py +++ b/src/pl_bolts/transforms/self_supervised/moco_transforms.py @@ -26,7 +26,6 @@ @under_review() class MoCoTrainTransforms: normalization: type - """MoCo training transforms. Args: diff --git a/src/pl_bolts/transforms/self_supervised/swav_transforms.py b/src/pl_bolts/transforms/self_supervised/swav_transforms.py index 8f9d4509ed..e2bbd827b7 100644 --- a/src/pl_bolts/transforms/self_supervised/swav_transforms.py +++ b/src/pl_bolts/transforms/self_supervised/swav_transforms.py @@ -92,7 +92,7 @@ def __init__( self.transform.append(online_train_transform) def __call__(self, sample: Tensor) -> List[Tensor]: - multi_crops = list(map(lambda transform: transform(sample), self.transform)) + multi_crops = [transform(sample) for transform in self.transform] return multi_crops