diff --git a/src/anomalib/models/image/draem/lightning_model.py b/src/anomalib/models/image/draem/lightning_model.py index f33bff6538..6eb0e197fc 100644 --- a/src/anomalib/models/image/draem/lightning_model.py +++ b/src/anomalib/models/image/draem/lightning_model.py @@ -12,6 +12,7 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT from torch import nn +from torchvision.transforms.v2 import Compose, Resize, Transform from anomalib import LearningType from anomalib.data.utils import Augmenter @@ -150,3 +151,13 @@ def learning_type(self) -> LearningType: LearningType: Learning type of the model. """ return LearningType.ONE_CLASS + + @staticmethod + def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: + """Default transform for DRAEM. Normalization is not needed as the images are scaled to [0, 1] in Dataset.""" + image_size = image_size or (256, 256) + return Compose( + [ + Resize(image_size, antialias=True), + ], + ) diff --git a/src/anomalib/models/image/dsr/lightning_model.py b/src/anomalib/models/image/dsr/lightning_model.py index b9a1136fd3..8381fce73d 100644 --- a/src/anomalib/models/image/dsr/lightning_model.py +++ b/src/anomalib/models/image/dsr/lightning_model.py @@ -13,6 +13,7 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT, OptimizerLRScheduler from torch import Tensor +from torchvision.transforms.v2 import Compose, Resize, Transform from anomalib import LearningType from anomalib.data.utils import DownloadInfo, download_and_extract @@ -191,3 +192,13 @@ def learning_type(self) -> LearningType: LearningType: Learning type of the model. """ return LearningType.ONE_CLASS + + @staticmethod + def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: + """Default transform for DSR. Normalization is not needed as the images are scaled to [0, 1] in Dataset.""" + image_size = image_size or (256, 256) + return Compose( + [ + Resize(image_size, antialias=True), + ], + ) diff --git a/src/anomalib/models/image/rkde/lightning_model.py b/src/anomalib/models/image/rkde/lightning_model.py index 02ad6c2564..f8b6af6d7a 100644 --- a/src/anomalib/models/image/rkde/lightning_model.py +++ b/src/anomalib/models/image/rkde/lightning_model.py @@ -11,6 +11,7 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT +from torchvision.transforms.v2 import Compose, Resize, Transform from anomalib import LearningType from anomalib.models.components import AnomalyModule, MemoryBankMixin @@ -143,3 +144,13 @@ def learning_type(self) -> LearningType: LearningType: Learning type of the model. """ return LearningType.ONE_CLASS + + @staticmethod + def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: + """Default transform for RKDE.""" + image_size = image_size or (240, 360) + return Compose( + [ + Resize(image_size, antialias=True), + ], + )