From 5338afa620facb36a0d632027c1dd9b63b51f0cc Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 12:14:05 +0100 Subject: [PATCH 01/59] Created pre-processor Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/__init__.py | 8 ++ src/anomalib/pre_processing/pre_processing.py | 95 +++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 src/anomalib/pre_processing/__init__.py create mode 100644 src/anomalib/pre_processing/pre_processing.py diff --git a/src/anomalib/pre_processing/__init__.py b/src/anomalib/pre_processing/__init__.py new file mode 100644 index 0000000000..d70565f882 --- /dev/null +++ b/src/anomalib/pre_processing/__init__.py @@ -0,0 +1,8 @@ +"""Anomalib pre-processing module.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .pre_processing import PreProcessor + +__all__ = ["PreProcessor"] diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py new file mode 100644 index 0000000000..c79331fa84 --- /dev/null +++ b/src/anomalib/pre_processing/pre_processing.py @@ -0,0 +1,95 @@ +"""Anomalib pre-processing module.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from lightning import Callback, LightningModule, Trainer +from torch import nn +from torchvision.transforms.v2 import Transform + +from anomalib.data.dataclasses.torch.base import Batch + + +class PreProcessor(nn.Module, Callback): + """Anomalib pre-processor.""" + + def __init__( + self, + train_transforms: Transform | None = None, + val_transforms: Transform | None = None, + test_transforms: Transform | None = None, + transforms: Transform | None = None, + ) -> None: + super().__init__() + + if transforms and any([train_transforms, val_transforms, test_transforms]): + msg = ( + "`transforms` cannot be used together with `train_transforms`, `val_transforms`, `test_transforms`.\n" + "If you want to apply the same transform to the training, validation and test data, " + "use only `transforms`. \n" + "Otherwise, specify transforms for training, validation and test individually." + ) + raise ValueError(msg) + + self.train_transforms = train_transforms or transforms + self.val_transforms = val_transforms or transforms + self.test_transforms = test_transforms or transforms + + def on_train_batch_start( + self, + trainer: Trainer, + pl_module: LightningModule, + batch: Batch, + batch_idx: int, + ) -> None: + """Apply transforms to the training batch.""" + del trainer, pl_module, batch_idx # Unused parameters + + if self.train_transforms: + image, gt_mask = self.train_transforms(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) + + def on_validation_batch_start( + self, + trainer: Trainer, + pl_module: LightningModule, + batch: Batch, + batch_idx: int, + dataloader_idx: int = 0, + ) -> None: + """Apply transforms to the validation batch.""" + del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + + if self.val_transforms: + image, gt_mask = self.val_transforms(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) + + def on_test_batch_start( + self, + trainer: Trainer, + pl_module: LightningModule, + batch: Batch, + batch_idx: int, + dataloader_idx: int = 0, + ) -> None: + """Apply transforms to the test batch.""" + del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + + if self.test_transforms: + image, gt_mask = self.test_transforms(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) + + def on_predict_batch_start( + self, + trainer: Trainer, + pl_module: LightningModule, + batch: Batch, + batch_idx: int, + dataloader_idx: int = 0, + ) -> None: + """Apply transforms to the predict batch.""" + del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + + if self.test_transforms: + image, gt_mask = self.test_transforms(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) From 180c22f205445f35640d1df7f8fc39a5d86bc935 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 12:34:21 +0100 Subject: [PATCH 02/59] Rename transforms to transform in pre-processor Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index c79331fa84..d27c3f4961 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -15,25 +15,25 @@ class PreProcessor(nn.Module, Callback): def __init__( self, - train_transforms: Transform | None = None, - val_transforms: Transform | None = None, - test_transforms: Transform | None = None, - transforms: Transform | None = None, + train_transform: Transform | None = None, + val_transform: Transform | None = None, + test_transform: Transform | None = None, + transform: Transform | None = None, ) -> None: super().__init__() - if transforms and any([train_transforms, val_transforms, test_transforms]): + if transform and any([train_transform, val_transform, test_transform]): msg = ( - "`transforms` cannot be used together with `train_transforms`, `val_transforms`, `test_transforms`.\n" + "`transforms` cannot be used together with `train_transform`, `val_transform`, `test_transform`.\n" "If you want to apply the same transform to the training, validation and test data, " "use only `transforms`. \n" "Otherwise, specify transforms for training, validation and test individually." ) raise ValueError(msg) - self.train_transforms = train_transforms or transforms - self.val_transforms = val_transforms or transforms - self.test_transforms = test_transforms or transforms + self.train_transform = train_transform or transform + self.val_transform = val_transform or transform + self.test_transform = test_transform or transform def on_train_batch_start( self, @@ -45,8 +45,8 @@ def on_train_batch_start( """Apply transforms to the training batch.""" del trainer, pl_module, batch_idx # Unused parameters - if self.train_transforms: - image, gt_mask = self.train_transforms(batch.image, batch.gt_mask) + if self.train_transform: + image, gt_mask = self.train_transform(batch.image, batch.gt_mask) batch.update(image=image, gt_mask=gt_mask) def on_validation_batch_start( @@ -60,8 +60,8 @@ def on_validation_batch_start( """Apply transforms to the validation batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.val_transforms: - image, gt_mask = self.val_transforms(batch.image, batch.gt_mask) + if self.val_transform: + image, gt_mask = self.val_transform(batch.image, batch.gt_mask) batch.update(image=image, gt_mask=gt_mask) def on_test_batch_start( @@ -75,8 +75,8 @@ def on_test_batch_start( """Apply transforms to the test batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.test_transforms: - image, gt_mask = self.test_transforms(batch.image, batch.gt_mask) + if self.test_transform: + image, gt_mask = self.test_transform(batch.image, batch.gt_mask) batch.update(image=image, gt_mask=gt_mask) def on_predict_batch_start( @@ -90,6 +90,6 @@ def on_predict_batch_start( """Apply transforms to the predict batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.test_transforms: - image, gt_mask = self.test_transforms(batch.image, batch.gt_mask) + if self.test_transform: + image, gt_mask = self.test_transform(batch.image, batch.gt_mask) batch.update(image=image, gt_mask=gt_mask) From 7738e386d2ccc64653250274448cc77472f2f9dd Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 12:40:44 +0100 Subject: [PATCH 03/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 63 ------------------- .../data/datamodules/depth/folder_3d.py | 20 ------ .../data/datamodules/depth/mvtec_3d.py | 20 +----- src/anomalib/data/datamodules/image/btech.py | 22 ------- src/anomalib/data/datamodules/image/folder.py | 22 ------- .../data/datamodules/image/kolektor.py | 20 ------ src/anomalib/data/datamodules/image/mvtec.py | 24 +------ src/anomalib/data/datamodules/image/visa.py | 19 ------ src/anomalib/data/datamodules/video/avenue.py | 19 ------ .../data/datamodules/video/shanghaitech.py | 20 ------ .../data/datamodules/video/ucsd_ped.py | 22 +------ src/anomalib/data/datasets/base/image.py | 2 +- 12 files changed, 7 insertions(+), 266 deletions(-) diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index 28fd9499eb..8476bf5eeb 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -12,7 +12,6 @@ from lightning.pytorch.trainer.states import TrainerFn from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from torch.utils.data.dataloader import DataLoader -from torchvision.transforms.v2 import Resize, Transform from anomalib.data.utils import TestSplitMode, ValSplitMode, random_split, split_by_label from anomalib.data.utils.synthetic import SyntheticAnomalyDataset @@ -40,14 +39,6 @@ class AnomalibDataModule(LightningDataModule, ABC): Defaults to ``None``. test_split_ratio (float): Fraction of the train images held out for testing. Defaults to ``None``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. seed (int | None, optional): Seed used during random subset splitting. Defaults to ``None``. """ @@ -61,10 +52,6 @@ def __init__( val_split_ratio: float, test_split_mode: TestSplitMode | str | None = None, test_split_ratio: float | None = None, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, seed: int | None = None, ) -> None: super().__init__() @@ -75,18 +62,8 @@ def __init__( self.test_split_ratio = test_split_ratio self.val_split_mode = ValSplitMode(val_split_mode) self.val_split_ratio = val_split_ratio - self.image_size = image_size self.seed = seed - # set transforms - if bool(train_transform) != bool(eval_transform): - msg = "Only one of train_transform and eval_transform was specified. This is not recommended because \ - it could lead to unexpected behaviour. Please ensure training and eval transforms have the same \ - reshape and normalization characteristics." - logger.warning(msg) - self._train_transform = train_transform or transform - self._eval_transform = eval_transform or transform - self.train_data: AnomalibDataset self.val_data: AnomalibDataset self.test_data: AnomalibDataset @@ -228,46 +205,6 @@ def predict_dataloader(self) -> EVAL_DATALOADERS: """Use the test dataloader for inference unless overridden.""" return self.test_dataloader() - @property - def transform(self) -> Transform: - """Property that returns the user-specified transform for the datamodule, if any. - - This property is accessed by the engine to set the transform for the model. The eval_transform takes precedence - over the train_transform, because the transform that we store in the model is the one that should be used during - inference. - """ - if self._eval_transform: - return self._eval_transform - return None - - @property - def train_transform(self) -> Transform: - """Get the transforms that will be passed to the train dataset. - - If the train_transform is not set, the engine will request the transform from the model. - """ - if self._train_transform: - return self._train_transform - if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: - return self.trainer.lightning_module.transform - if self.image_size: - return Resize(self.image_size, antialias=True) - return None - - @property - def eval_transform(self) -> Transform: - """Get the transform that will be passed to the val/test/predict datasets. - - If the eval_transform is not set, the engine will request the transform from the model. - """ - if self._eval_transform: - return self._eval_transform - if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: - return self.trainer.lightning_module.transform - if self.image_size: - return Resize(self.image_size, antialias=True) - return None - @classmethod def from_config( cls: type["AnomalibDataModule"], diff --git a/src/anomalib/data/datamodules/depth/folder_3d.py b/src/anomalib/data/datamodules/depth/folder_3d.py index cebea42d02..2e2930be26 100644 --- a/src/anomalib/data/datamodules/depth/folder_3d.py +++ b/src/anomalib/data/datamodules/depth/folder_3d.py @@ -8,8 +8,6 @@ from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.folder_3d import Folder3DDataset @@ -51,14 +49,6 @@ class Folder3D(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -87,10 +77,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -101,10 +87,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -127,7 +109,6 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = Folder3DDataset( name=self.name, task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -143,7 +124,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = Folder3DDataset( name=self.name, task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/depth/mvtec_3d.py b/src/anomalib/data/datamodules/depth/mvtec_3d.py index 1e5b90e917..b833643419 100644 --- a/src/anomalib/data/datamodules/depth/mvtec_3d.py +++ b/src/anomalib/data/datamodules/depth/mvtec_3d.py @@ -22,8 +22,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.mvtec_3d import MVTec3DDataset @@ -62,13 +60,9 @@ class MVTec3D(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. + test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. @@ -90,10 +84,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -104,10 +94,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -122,14 +108,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = MVTec3DDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTec3DDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/btech.py b/src/anomalib/data/datamodules/image/btech.py index 5abda6156e..11bbcf387f 100644 --- a/src/anomalib/data/datamodules/image/btech.py +++ b/src/anomalib/data/datamodules/image/btech.py @@ -14,7 +14,6 @@ from pathlib import Path import cv2 -from torchvision.transforms.v2 import Transform from tqdm import tqdm from anomalib import TaskType @@ -53,14 +52,6 @@ class BTech(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode, optional): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float, optional): Fraction of images from the train set that will be reserved for testing. @@ -79,12 +70,9 @@ class BTech(AnomalibDataModule): >>> datamodule = BTech( ... root="./datasets/BTech", ... category="01", - ... image_size=256, ... train_batch_size=32, ... eval_batch_size=32, ... num_workers=8, - ... transform_config_train=None, - ... transform_config_eval=None, ... ) >>> datamodule.setup() @@ -121,10 +109,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -135,10 +119,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -153,14 +133,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = BTechDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = BTechDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/folder.py b/src/anomalib/data/datamodules/image/folder.py index 7941ba2f7b..7fe51c32a0 100644 --- a/src/anomalib/data/datamodules/image/folder.py +++ b/src/anomalib/data/datamodules/image/folder.py @@ -9,8 +9,6 @@ from collections.abc import Sequence from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.folder import FolderDataset @@ -47,14 +45,6 @@ class Folder(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. Defaults to ``segmentation``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -102,8 +92,6 @@ class Folder(AnomalibDataModule): abnormal_dir="crack", task=TaskType.SEGMENTATION, mask_dir=dataset_root / "mask" / "crack", - image_size=256, - normalization=InputNormalizationMethod.NONE, ) folder_datamodule.setup() @@ -136,10 +124,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -164,10 +148,6 @@ def __init__( test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, seed=seed, ) @@ -186,7 +166,6 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = FolderDataset( name=self.name, task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -199,7 +178,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = FolderDataset( name=self.name, task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/image/kolektor.py b/src/anomalib/data/datamodules/image/kolektor.py index 2f8dc3b92b..fa766b7535 100644 --- a/src/anomalib/data/datamodules/image/kolektor.py +++ b/src/anomalib/data/datamodules/image/kolektor.py @@ -20,8 +20,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.kolektor import KolektorDataset @@ -56,14 +54,6 @@ class Kolektor(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR`` test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -83,10 +73,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -97,10 +83,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -114,13 +96,11 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = KolektorDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, ) self.test_data = KolektorDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, ) diff --git a/src/anomalib/data/datamodules/image/mvtec.py b/src/anomalib/data/datamodules/image/mvtec.py index 508a582380..da23a1644a 100644 --- a/src/anomalib/data/datamodules/image/mvtec.py +++ b/src/anomalib/data/datamodules/image/mvtec.py @@ -28,8 +28,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.mvtec import MVTecDataset @@ -68,14 +66,6 @@ class MVTec(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -103,9 +93,9 @@ class MVTec(AnomalibDataModule): >>> datamodule = MVTec(category="cable") - To change the image and batch size: + To change the batch size: - >>> datamodule = MVTec(image_size=(512, 512), train_batch_size=16, eval_batch_size=8) + >>> datamodule = MVTec(train_batch_size=16, eval_batch_size=8) MVTec AD dataset does not provide a validation set. If you would like to use a separate validation set, you can use the ``val_split_mode`` and @@ -129,10 +119,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -142,10 +128,6 @@ def __init__( super().__init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, num_workers=num_workers, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, @@ -172,14 +154,12 @@ def _setup(self, _stage: str | None = None) -> None: """ self.train_data = MVTecDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTecDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/visa.py b/src/anomalib/data/datamodules/image/visa.py index 30bf945c73..6c30e58956 100644 --- a/src/anomalib/data/datamodules/image/visa.py +++ b/src/anomalib/data/datamodules/image/visa.py @@ -28,7 +28,6 @@ from pathlib import Path import cv2 -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule @@ -66,14 +65,6 @@ class Visa(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -94,10 +85,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -108,10 +95,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -127,14 +110,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = VisaDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.split_root, category=self.category, ) self.test_data = VisaDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.split_root, category=self.category, diff --git a/src/anomalib/data/datamodules/video/avenue.py b/src/anomalib/data/datamodules/video/avenue.py index 8914475081..67a0614ca1 100644 --- a/src/anomalib/data/datamodules/video/avenue.py +++ b/src/anomalib/data/datamodules/video/avenue.py @@ -21,7 +21,6 @@ import cv2 import scipy.io -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule @@ -64,14 +63,6 @@ class Avenue(AnomalibVideoDataModule): Defaults to ``VideoTargetFrame.LAST``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to ``32``. eval_batch_size (int, optional): Test batch size. @@ -141,10 +132,6 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame | str = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -156,10 +143,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -175,7 +158,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = AvenueDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -186,7 +168,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = AvenueDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datamodules/video/shanghaitech.py b/src/anomalib/data/datamodules/video/shanghaitech.py index b474f09547..a50661496e 100644 --- a/src/anomalib/data/datamodules/video/shanghaitech.py +++ b/src/anomalib/data/datamodules/video/shanghaitech.py @@ -20,8 +20,6 @@ from pathlib import Path from shutil import move -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame @@ -53,14 +51,6 @@ class ShanghaiTech(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task TaskType): Task type, 'classification', 'detection' or 'segmentation' - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to 32. eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. @@ -77,10 +67,6 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -92,10 +78,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -112,7 +94,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = ShanghaiTechDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -123,7 +104,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = ShanghaiTechDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datamodules/video/ucsd_ped.py b/src/anomalib/data/datamodules/video/ucsd_ped.py index 2dd480ef37..410efd7728 100644 --- a/src/anomalib/data/datamodules/video/ucsd_ped.py +++ b/src/anomalib/data/datamodules/video/ucsd_ped.py @@ -7,8 +7,6 @@ from pathlib import Path from shutil import move -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame @@ -34,16 +32,10 @@ class UCSDped(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task (TaskType): Task type, 'classification', 'detection' or 'segmentation' - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. + train_batch_size (int, optional): Training batch size. Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + eval_batch_size (int, optional): Test batch size. Defaults to ``None``. - train_batch_size (int, optional): Training batch size. Defaults to 32. - eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. val_split_mode (ValSplitMode): Setting that determines how the validation subset is obtained. val_split_ratio (float): Fraction of train or test images that will be reserved for validation. @@ -58,10 +50,6 @@ def __init__( frames_between_clips: int = 10, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 8, eval_batch_size: int = 8, num_workers: int = 8, @@ -73,10 +61,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -93,7 +77,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = UCSDpedDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -104,7 +87,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = UCSDpedDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datasets/base/image.py b/src/anomalib/data/datasets/base/image.py index 5aaabc8fe4..96f6d3a929 100644 --- a/src/anomalib/data/datasets/base/image.py +++ b/src/anomalib/data/datasets/base/image.py @@ -179,7 +179,7 @@ def __getitem__(self, index: int) -> DatasetItem: if label_index == LabelName.NORMAL else read_mask(mask_path, as_tensor=True) ) - item["image"], item["gt_mask"] = self.transform(image, mask) if self.transform else (image, mask) + item["image"], item["gt_mask"] = image, mask else: msg = f"Unknown task type: {self.task}" From a1330488520aacfb14e5cdcac1739e58ef3a8e36 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 12:51:42 +0100 Subject: [PATCH 04/59] Remove transforms from datasets Signed-off-by: Samet Akcay --- src/anomalib/data/datasets/base/depth.py | 18 ++++-------------- src/anomalib/data/datasets/base/image.py | 8 ++------ src/anomalib/data/datasets/base/video.py | 13 +------------ src/anomalib/data/datasets/depth/folder_3d.py | 7 +------ src/anomalib/data/datasets/depth/mvtec_3d.py | 6 +----- src/anomalib/data/datasets/image/btech.py | 9 +-------- src/anomalib/data/datasets/image/folder.py | 14 ++------------ src/anomalib/data/datasets/image/kolektor.py | 6 +----- src/anomalib/data/datasets/image/mvtec.py | 9 +-------- src/anomalib/data/datasets/image/visa.py | 13 +------------ src/anomalib/data/datasets/video/avenue.py | 9 --------- .../data/datasets/video/shanghaitech.py | 5 ----- src/anomalib/data/datasets/video/ucsd_ped.py | 5 ----- src/anomalib/data/utils/synthetic.py | 10 ++++------ 14 files changed, 19 insertions(+), 113 deletions(-) diff --git a/src/anomalib/data/datasets/base/depth.py b/src/anomalib/data/datasets/base/depth.py index 56460b3a6a..cc913f3eaa 100644 --- a/src/anomalib/data/datasets/base/depth.py +++ b/src/anomalib/data/datasets/base/depth.py @@ -9,7 +9,6 @@ import torch from PIL import Image from torchvision.transforms.functional import to_tensor -from torchvision.transforms.v2 import Transform from torchvision.tv_tensors import Mask from anomalib import TaskType @@ -24,14 +23,10 @@ class AnomalibDepthDataset(AnomalibDataset, ABC): Args: task (str): Task type, either 'classification' or 'segmentation' - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. """ - def __init__(self, task: TaskType, transform: Transform | None = None) -> None: - super().__init__(task, transform) - - self.transform = transform + def __init__(self, task: TaskType) -> None: + super().__init__(task) def __getitem__(self, index: int) -> DepthItem: """Return rgb image, depth image and mask. @@ -52,9 +47,7 @@ def __getitem__(self, index: int) -> DepthItem: item = {"image_path": image_path, "depth_path": depth_path, "label": label_index} if self.task == TaskType.CLASSIFICATION: - item["image"], item["depth_image"] = ( - self.transform(image, depth_image) if self.transform else (image, depth_image) - ) + item["image"], item["depth_image"] = image, depth_image elif self.task == TaskType.SEGMENTATION: # Only Anomalous (1) images have masks in anomaly datasets # Therefore, create empty mask for Normal (0) images. @@ -63,11 +56,8 @@ def __getitem__(self, index: int) -> DepthItem: if label_index == LabelName.NORMAL else Mask(to_tensor(Image.open(mask_path)).squeeze()) ) - item["image"], item["depth_image"], item["mask"] = ( - self.transform(image, depth_image, mask) if self.transform else (image, depth_image, mask) - ) + item["image"], item["depth_image"], item["mask"] = image, depth_image, mask item["mask_path"] = mask_path - else: msg = f"Unknown task type: {self.task}" raise ValueError(msg) diff --git a/src/anomalib/data/datasets/base/image.py b/src/anomalib/data/datasets/base/image.py index 96f6d3a929..ade50c00ff 100644 --- a/src/anomalib/data/datasets/base/image.py +++ b/src/anomalib/data/datasets/base/image.py @@ -13,7 +13,6 @@ import torch from pandas import DataFrame from torch.utils.data import Dataset -from torchvision.transforms.v2 import Transform from torchvision.tv_tensors import Mask from anomalib import TaskType @@ -58,14 +57,11 @@ class AnomalibDataset(Dataset, ABC): Args: task (str): Task type, either 'classification' or 'segmentation' - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. """ - def __init__(self, task: TaskType | str, transform: Transform | None = None) -> None: + def __init__(self, task: TaskType | str) -> None: super().__init__() self.task = TaskType(task) - self.transform = transform self._samples: DataFrame | None = None self._category: str | None = None @@ -170,7 +166,7 @@ def __getitem__(self, index: int) -> DatasetItem: item = {"image_path": image_path, "gt_label": label_index} if self.task == TaskType.CLASSIFICATION: - item["image"] = self.transform(image) if self.transform else image + item["image"] = image elif self.task == TaskType.SEGMENTATION: # Only Anomalous (1) images have masks in anomaly datasets # Therefore, create empty mask for Normal (0) images. diff --git a/src/anomalib/data/datasets/base/video.py b/src/anomalib/data/datasets/base/video.py index 3ba8f2fd83..5ab1193d7c 100644 --- a/src/anomalib/data/datasets/base/video.py +++ b/src/anomalib/data/datasets/base/video.py @@ -9,9 +9,7 @@ import torch from pandas import DataFrame -from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.functional import to_dtype, to_dtype_video -from torchvision.tv_tensors import Mask from anomalib import TaskType from anomalib.data.dataclasses import VideoBatch, VideoItem @@ -39,8 +37,6 @@ class AnomalibVideoDataset(AnomalibDataset, ABC): task (str): Task type, either 'classification' or 'segmentation' clip_length_in_frames (int): Number of video frames in each clip. frames_between_clips (int): Number of frames between each consecutive video clip. - transform (Transform, optional): Transforms that should be applied to the input clips. - Defaults to ``None``. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. Defaults to ``VideoTargetFrame.LAST``. """ @@ -50,14 +46,12 @@ def __init__( task: TaskType, clip_length_in_frames: int, frames_between_clips: int, - transform: Transform | None = None, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, ) -> None: - super().__init__(task, transform) + super().__init__(task) self.clip_length_in_frames = clip_length_in_frames self.frames_between_clips = frames_between_clips - self.transform = transform self.indexer: ClipsIndexer | None = None self.indexer_cls: Callable | None = None @@ -153,13 +147,8 @@ def __getitem__(self, index: int) -> VideoItem: # include the untransformed image for visualization item.original_image = to_dtype(item.image, torch.uint8, scale=True) - # apply transforms if item.gt_mask is not None: - if self.transform: - item.image, item.gt_mask = self.transform(item.image, Mask(item.gt_mask)) item.gt_label = torch.Tensor([1 in frame for frame in item.gt_mask]).int().squeeze(0) - elif self.transform: - item.image = self.transform(item.image) # squeeze temporal dimensions in case clip length is 1 item.image = item.image.squeeze(0) diff --git a/src/anomalib/data/datasets/depth/folder_3d.py b/src/anomalib/data/datasets/depth/folder_3d.py index 9ec78487b3..752727e1c6 100644 --- a/src/anomalib/data/datasets/depth/folder_3d.py +++ b/src/anomalib/data/datasets/depth/folder_3d.py @@ -9,7 +9,6 @@ from pathlib import Path from pandas import DataFrame, isna -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.depth import AnomalibDepthDataset @@ -24,7 +23,6 @@ class Folder3DDataset(AnomalibDepthDataset): Args: name (str): Name of the dataset. task (TaskType): Task type. (``classification``, ``detection`` or ``segmentation``). - transform (Transform): Transforms that should be applied to the input images. normal_dir (str | Path): Path to the directory containing normal images. root (str | Path | None): Root folder of the dataset. Defaults to ``None``. @@ -45,8 +43,6 @@ class Folder3DDataset(AnomalibDepthDataset): normal_test_depth_dir (str | Path | None, optional): Path to the directory containing normal depth images for the test dataset. Normal test images will be a split of `normal_dir` if `None`. Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split (str | Split | None): Fixed subset split that follows from folder structure on file system. Choose from [Split.FULL, Split.TRAIN, Split.TEST] Defaults to ``None``. @@ -70,11 +66,10 @@ def __init__( normal_depth_dir: str | Path | None = None, abnormal_depth_dir: str | Path | None = None, normal_test_depth_dir: str | Path | None = None, - transform: Transform | None = None, split: str | Split | None = None, extensions: tuple[str, ...] | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task) self._name = name self.split = split diff --git a/src/anomalib/data/datasets/depth/mvtec_3d.py b/src/anomalib/data/datasets/depth/mvtec_3d.py index de6d326a4a..82ef991a96 100644 --- a/src/anomalib/data/datasets/depth/mvtec_3d.py +++ b/src/anomalib/data/datasets/depth/mvtec_3d.py @@ -23,7 +23,6 @@ from pathlib import Path from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.depth import AnomalibDepthDataset @@ -43,8 +42,6 @@ class MVTec3DDataset(AnomalibDepthDataset): Defaults to ``"./datasets/MVTec3D"``. category (str): Sub-category of the dataset, e.g. 'bagel' Defaults to ``"bagel"``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. """ @@ -54,10 +51,9 @@ def __init__( task: TaskType, root: Path | str = "./datasets/MVTec3D", category: str = "bagel", - transform: Transform | None = None, split: str | Split | None = None, ) -> None: - super().__init__(task=task, transform=transform) + super().__init__(task) self.root_category = Path(root) / Path(category) self.split = split diff --git a/src/anomalib/data/datasets/image/btech.py b/src/anomalib/data/datasets/image/btech.py index 412097c912..f51c5a4669 100644 --- a/src/anomalib/data/datasets/image/btech.py +++ b/src/anomalib/data/datasets/image/btech.py @@ -13,7 +13,6 @@ import pandas as pd from pandas.core.frame import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -28,19 +27,14 @@ class BTechDataset(AnomalibDataset): Args: root: Path to the BTech dataset category: Name of the BTech category. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split: 'train', 'val' or 'test' task: ``classification``, ``detection`` or ``segmentation`` create_validation_set: Create a validation subset in addition to the train and test subsets Examples: >>> from anomalib.data.image.btech import BTechDataset - >>> from anomalib.data.utils.transforms import get_transforms - >>> transform = get_transforms(image_size=256) >>> dataset = BTechDataset( ... task="classification", - ... transform=transform, ... root='./datasets/BTech', ... category='01', ... ) @@ -69,11 +63,10 @@ def __init__( self, root: str | Path, category: str, - transform: Transform | None = None, split: str | Split | None = None, task: TaskType | str = TaskType.SEGMENTATION, ) -> None: - super().__init__(task, transform) + super().__init__(task) self.root_category = Path(root) / category self.split = split diff --git a/src/anomalib/data/datasets/image/folder.py b/src/anomalib/data/datasets/image/folder.py index 48415c0867..01e1986414 100644 --- a/src/anomalib/data/datasets/image/folder.py +++ b/src/anomalib/data/datasets/image/folder.py @@ -10,7 +10,6 @@ from pathlib import Path from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -27,8 +26,6 @@ class FolderDataset(AnomalibDataset): Args: name (str): Name of the dataset. This is used to name the datamodule, especially when logging/saving. task (TaskType): Task type. (``classification``, ``detection`` or ``segmentation``). - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. normal_dir (str | Path | Sequence): Path to the directory containing normal images. root (str | Path | None): Root folder of the dataset. Defaults to ``None``. @@ -52,12 +49,7 @@ class FolderDataset(AnomalibDataset): Examples: Assume that we would like to use this ``FolderDataset`` to create a dataset from a folder for a classification - task. We could first create the transforms, - - >>> from anomalib.data.utils import InputNormalizationMethod, get_transforms - >>> transform = get_transforms(image_size=256, normalization=InputNormalizationMethod.NONE) - - We could then create the dataset as follows, + task. .. code-block:: python @@ -65,7 +57,6 @@ class FolderDataset(AnomalibDataset): normal_dir=dataset_root / "good", abnormal_dir=dataset_root / "crack", split="train", - transform=transform, task=TaskType.CLASSIFICATION, ) @@ -76,7 +67,6 @@ def __init__( name: str, task: TaskType, normal_dir: str | Path | Sequence[str | Path], - transform: Transform | None = None, root: str | Path | None = None, abnormal_dir: str | Path | Sequence[str | Path] | None = None, normal_test_dir: str | Path | Sequence[str | Path] | None = None, @@ -84,7 +74,7 @@ def __init__( split: str | Split | None = None, extensions: tuple[str, ...] | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task) self._name = name self.split = split diff --git a/src/anomalib/data/datasets/image/kolektor.py b/src/anomalib/data/datasets/image/kolektor.py index 39e9380a03..63a9feed36 100644 --- a/src/anomalib/data/datasets/image/kolektor.py +++ b/src/anomalib/data/datasets/image/kolektor.py @@ -23,7 +23,6 @@ from cv2 import imread from pandas import DataFrame from sklearn.model_selection import train_test_split -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets import AnomalibDataset @@ -38,8 +37,6 @@ class KolektorDataset(AnomalibDataset): task (TaskType): Task type, ``classification``, ``detection`` or ``segmentation`` root (Path | str): Path to the root of the dataset Defaults to ``./datasets/kolektor``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. """ @@ -48,10 +45,9 @@ def __init__( self, task: TaskType, root: Path | str = "./datasets/kolektor", - transform: Transform | None = None, split: str | Split | None = None, ) -> None: - super().__init__(task=task, transform=transform) + super().__init__(task) self.root = root self.split = split diff --git a/src/anomalib/data/datasets/image/mvtec.py b/src/anomalib/data/datasets/image/mvtec.py index bb6fdf9e41..2e7e0c2af7 100644 --- a/src/anomalib/data/datasets/image/mvtec.py +++ b/src/anomalib/data/datasets/image/mvtec.py @@ -29,7 +29,6 @@ from pathlib import Path from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base import AnomalibDataset @@ -65,8 +64,6 @@ class MVTecDataset(AnomalibDataset): Defaults to ``./datasets/MVTec``. category (str): Sub-category of the dataset, e.g. 'bottle' Defaults to ``bottle``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. @@ -74,12 +71,9 @@ class MVTecDataset(AnomalibDataset): .. code-block:: python from anomalib.data.image.mvtec import MVTecDataset - from anomalib.data.utils.transforms import get_transforms - transform = get_transforms(image_size=256) dataset = MVTecDataset( task="classification", - transform=transform, root='./datasets/MVTec', category='zipper', ) @@ -110,10 +104,9 @@ def __init__( task: TaskType, root: Path | str = "./datasets/MVTec", category: str = "bottle", - transform: Transform | None = None, split: str | Split | None = None, ) -> None: - super().__init__(task=task, transform=transform) + super().__init__(task) self.root_category = Path(root) / Path(category) self.category = category diff --git a/src/anomalib/data/datasets/image/visa.py b/src/anomalib/data/datasets/image/visa.py index 9c5336ab05..f74f2a9535 100644 --- a/src/anomalib/data/datasets/image/visa.py +++ b/src/anomalib/data/datasets/image/visa.py @@ -21,8 +21,6 @@ from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datasets import AnomalibDataset from anomalib.data.datasets.image.mvtec import make_mvtec_dataset @@ -52,8 +50,6 @@ class VisaDataset(AnomalibDataset): task (TaskType): Task type, ``classification``, ``detection`` or ``segmentation`` root (str | Path): Path to the root of the dataset category (str): Sub-category of the dataset, e.g. 'candle' - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. @@ -63,12 +59,9 @@ class VisaDataset(AnomalibDataset): .. code-block:: python from anomalib.data.image.visa import VisaDataset - from anomalib.data.utils.transforms import get_transforms - transform = get_transforms(image_size=256) dataset = VisaDataset( task="classification", - transform=transform, split="train", root="./datasets/visa/visa_pytorch/", category="candle", @@ -86,12 +79,9 @@ class VisaDataset(AnomalibDataset): .. code-block:: python from anomalib.data.image.visa import VisaDataset - from anomalib.data.utils.transforms import get_transforms - transform = get_transforms(image_size=256) dataset = VisaDataset( task="segmentation", - transform=transform, split="train", root="./datasets/visa/visa_pytorch/", category="candle", @@ -109,10 +99,9 @@ def __init__( task: TaskType, root: str | Path, category: str, - transform: Transform | None = None, split: str | Split | None = None, ) -> None: - super().__init__(task=task, transform=transform) + super().__init__(task) self.root_category = Path(root) / category self.split = split diff --git a/src/anomalib/data/datasets/video/avenue.py b/src/anomalib/data/datasets/video/avenue.py index 0d3bd741bf..21c9f49a22 100644 --- a/src/anomalib/data/datasets/video/avenue.py +++ b/src/anomalib/data/datasets/video/avenue.py @@ -20,7 +20,6 @@ import scipy import torch from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -47,18 +46,14 @@ class AvenueDataset(AnomalibVideoDataset): Defaults to ``1``. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. Defaults to ``VideoTargetFrame.LAST``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. Examples: To create an Avenue dataset to train a classification model: .. code-block:: python - transform = A.Compose([A.Resize(256, 256), A.pytorch.ToTensorV2()]) dataset = AvenueDataset( task="classification", - transform=transform, split="train", root="./datasets/avenue/", ) @@ -74,7 +69,6 @@ class AvenueDataset(AnomalibVideoDataset): dataset = AvenueDataset( task="segmentation", - transform=transform, split="test", root="./datasets/avenue/", ) @@ -92,7 +86,6 @@ class AvenueDataset(AnomalibVideoDataset): dataset = AvenueDataset( task="classification", - transform=transform, split="test", root="./datasets/avenue/", clip_length_in_frames=1, @@ -114,7 +107,6 @@ def __init__( gt_dir: Path | str = "./datasets/avenue/ground_truth_demo", clip_length_in_frames: int = 2, frames_between_clips: int = 1, - transform: Transform | None = None, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, ) -> None: super().__init__( @@ -122,7 +114,6 @@ def __init__( clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, - transform=transform, ) self.root = root if isinstance(root, Path) else Path(root) diff --git a/src/anomalib/data/datasets/video/shanghaitech.py b/src/anomalib/data/datasets/video/shanghaitech.py index e90dbae482..12e8dd985b 100644 --- a/src/anomalib/data/datasets/video/shanghaitech.py +++ b/src/anomalib/data/datasets/video/shanghaitech.py @@ -23,7 +23,6 @@ import pandas as pd import torch from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -42,8 +41,6 @@ class ShanghaiTechDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. """ def __init__( @@ -55,14 +52,12 @@ def __init__( clip_length_in_frames: int = 2, frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, - transform: Transform | None = None, ) -> None: super().__init__( task=task, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, - transform=transform, ) self.root = Path(root) diff --git a/src/anomalib/data/datasets/video/ucsd_ped.py b/src/anomalib/data/datasets/video/ucsd_ped.py index 960218e79e..feeda8ff7f 100644 --- a/src/anomalib/data/datasets/video/ucsd_ped.py +++ b/src/anomalib/data/datasets/video/ucsd_ped.py @@ -9,7 +9,6 @@ import numpy as np import torch from pandas import DataFrame -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -33,8 +32,6 @@ class UCSDpedDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. """ def __init__( @@ -46,14 +43,12 @@ def __init__( clip_length_in_frames: int = 2, frames_between_clips: int = 10, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, - transform: Transform | None = None, ) -> None: super().__init__( task=task, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, - transform=transform, ) self.root_category = Path(root) / category diff --git a/src/anomalib/data/utils/synthetic.py b/src/anomalib/data/utils/synthetic.py index 16aa20d83d..c9626f81c1 100644 --- a/src/anomalib/data/utils/synthetic.py +++ b/src/anomalib/data/utils/synthetic.py @@ -16,7 +16,6 @@ import cv2 import pandas as pd from pandas import DataFrame, Series -from torchvision.transforms.v2 import Compose from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -80,7 +79,7 @@ def augment(sample: Series) -> Series: Returns: Series: DataFrame row with updated information about the augmented image. """ - # read and transform image + # read image image = read_image(sample.image_path, as_tensor=True) # apply anomalous perturbation aug_im, mask = augmenter.augment_batch(image.unsqueeze(0)) @@ -114,12 +113,11 @@ class SyntheticAnomalyDataset(AnomalibDataset): Args: task (str): Task type, either "classification" or "segmentation". - transform (A.Compose): Transform object describing the transforms that are applied to the inputs. source_samples (DataFrame): Normal samples to which the anomalous augmentations will be applied. """ - def __init__(self, task: TaskType, transform: Compose, source_samples: DataFrame) -> None: - super().__init__(task, transform) + def __init__(self, task: TaskType, source_samples: DataFrame) -> None: + super().__init__(task) self.source_samples = source_samples @@ -146,7 +144,7 @@ def from_dataset(cls: type["SyntheticAnomalyDataset"], dataset: AnomalibDataset) dataset (AnomalibDataset): Dataset consisting of only normal images that will be converrted to a synthetic anomalous dataset with a 50/50 normal anomalous split. """ - return cls(task=dataset.task, transform=dataset.transform, source_samples=dataset.samples) + return cls(task=dataset.task, source_samples=dataset.samples) def __copy__(self) -> "SyntheticAnomalyDataset": """Return a shallow copy of the dataset object and prevents cleanup when original object is deleted.""" From c748a0d11f86fff5da6e5cda08c61909cb425e4d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 12:54:55 +0100 Subject: [PATCH 05/59] Remove setup_transforms from Engine Signed-off-by: Samet Akcay --- src/anomalib/engine/engine.py | 63 +---------------------------------- 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/src/anomalib/engine/engine.py b/src/anomalib/engine/engine.py index e7612e6e57..c0de7dbea1 100644 --- a/src/anomalib/engine/engine.py +++ b/src/anomalib/engine/engine.py @@ -8,7 +8,6 @@ from pathlib import Path from typing import Any -import torch from lightning.pytorch.callbacks import Callback from lightning.pytorch.loggers import Logger from lightning.pytorch.trainer import Trainer @@ -302,60 +301,6 @@ def _setup_dataset_task( ) data.task = self.task - @staticmethod - def _setup_transform( - model: AnomalyModule, - datamodule: AnomalibDataModule | None = None, - dataloaders: EVAL_DATALOADERS | TRAIN_DATALOADERS | None = None, - ckpt_path: Path | str | None = None, - ) -> None: - """Implements the logic for setting the transform at the start of each run. - - Any transform passed explicitly to the datamodule takes precedence. Otherwise, if a checkpoint path is provided, - we can load the transform from the checkpoint. If no transform is provided, we use the default transform from - the model. - - Args: - model (AnomalyModule): The model to assign the transform to. - datamodule (AnomalibDataModule | None): The datamodule to assign the transform from. - defaults to ``None``. - dataloaders (EVAL_DATALOADERS | TRAIN_DATALOADERS | None): Dataloaders to assign the transform to. - defaults to ``None``. - ckpt_path (str): The path to the checkpoint. - defaults to ``None``. - - Returns: - Transform: The transform loaded from the checkpoint. - """ - if isinstance(dataloaders, DataLoader): - dataloaders = [dataloaders] - - # get transform - if datamodule and datamodule.transform: - # a transform passed explicitly to the datamodule takes precedence - transform = datamodule.transform - elif dataloaders and any(getattr(dl.dataset, "transform", None) for dl in dataloaders): - # if dataloaders are provided, we use the transform from the first dataloader that has a transform - transform = next(dl.dataset.transform for dl in dataloaders if getattr(dl.dataset, "transform", None)) - elif ckpt_path is not None: - # if a checkpoint path is provided, we can load the transform from the checkpoint - checkpoint = torch.load(ckpt_path, map_location=model.device) - transform = checkpoint["transform"] - elif model.transform is None: - # if no transform is provided, we use the default transform from the model - image_size = datamodule.image_size if datamodule else None - transform = model.configure_transforms(image_size) - else: - transform = model.transform - - # update transform in model - model.set_transform(transform) - # The dataloaders don't have access to the trainer and/or model, so we need to set the transforms manually - if dataloaders: - for dataloader in dataloaders: - if not getattr(dataloader.dataset, "transform", None): - dataloader.dataset.transform = transform - def _setup_anomalib_callbacks(self, model: AnomalyModule) -> None: """Set up callbacks for the trainer.""" _callbacks: list[Callback] = [] @@ -471,7 +416,6 @@ def fit( ) self._setup_trainer(model) self._setup_dataset_task(train_dataloaders, val_dataloaders, datamodule) - self._setup_transform(model, datamodule=datamodule, ckpt_path=ckpt_path) if model.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}: # if the model is zero-shot or few-shot, we only need to run validate for normalization and thresholding self.trainer.validate(model, val_dataloaders, datamodule=datamodule, ckpt_path=ckpt_path) @@ -525,7 +469,6 @@ def validate( if model: self._setup_trainer(model) self._setup_dataset_task(dataloaders) - self._setup_transform(model or self.model, datamodule=datamodule, ckpt_path=ckpt_path) return self.trainer.validate(model, dataloaders, ckpt_path, verbose, datamodule) def test( @@ -619,7 +562,6 @@ def test( raise RuntimeError(msg) self._setup_dataset_task(dataloaders) - self._setup_transform(model or self.model, datamodule=datamodule, ckpt_path=ckpt_path) if self._should_run_validation(model or self.model, ckpt_path): logger.info("Running validation before testing to collect normalization metrics and/or thresholds.") self.trainer.validate(model, dataloaders, None, verbose=False, datamodule=datamodule) @@ -724,7 +666,6 @@ def predict( dataloaders = dataloaders or None self._setup_dataset_task(dataloaders, datamodule) - self._setup_transform(model or self.model, datamodule=datamodule, dataloaders=dataloaders, ckpt_path=ckpt_path) if self._should_run_validation(model or self.model, ckpt_path): logger.info("Running validation before predicting to collect normalization metrics and/or thresholds.") @@ -794,7 +735,6 @@ def train( test_dataloaders, datamodule, ) - self._setup_transform(model, datamodule=datamodule, ckpt_path=ckpt_path) if model.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}: # if the model is zero-shot or few-shot, we only need to run validate for normalization and thresholding self.trainer.validate(model, val_dataloaders, None, verbose=False, datamodule=datamodule) @@ -841,8 +781,7 @@ def export( Path: Path to the exported model. Raises: - ValueError: If Dataset, Datamodule, and transform are not provided. - TypeError: If path to the transform file is not a string or Path. + ValueError: If Dataset, Datamodule are not provided. CLI Usage: 1. To export as a torch ``.pt`` file you can run the following command. From 03a2a2e9f4a9b5a76492a514f1babb44872702de Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 9 Oct 2024 14:28:46 +0100 Subject: [PATCH 06/59] Add preprocessor to AnomalyModule and models Signed-off-by: Samet Akcay --- src/anomalib/data/predict.py | 12 +---- .../models/components/base/anomaly_module.py | 35 +++++++----- .../models/image/cfa/lightning_model.py | 7 ++- .../models/image/cflow/lightning_model.py | 4 +- .../models/image/csflow/lightning_model.py | 4 +- .../models/image/dfkde/lightning_model.py | 4 +- .../models/image/dfm/lightning_model.py | 7 ++- .../models/image/draem/lightning_model.py | 7 ++- .../models/image/dsr/lightning_model.py | 13 ++++- .../image/efficient_ad/lightning_model.py | 26 ++++----- .../models/image/fastflow/lightning_model.py | 9 +++- .../models/image/fre/lightning_model.py | 7 ++- .../models/image/ganomaly/lightning_model.py | 7 ++- .../models/image/padim/lightning_model.py | 19 ++++--- .../models/image/patchcore/lightning_model.py | 38 +++++++------ .../reverse_distillation/lightning_model.py | 7 ++- .../models/image/rkde/lightning_model.py | 7 ++- .../models/image/stfpm/lightning_model.py | 7 ++- .../models/image/uflow/lightning_model.py | 54 ++++++++++--------- .../models/image/winclip/lightning_model.py | 22 +++++--- .../models/video/ai_vad/lightning_model.py | 18 +++++-- 21 files changed, 201 insertions(+), 113 deletions(-) diff --git a/src/anomalib/data/predict.py b/src/anomalib/data/predict.py index 06c743b88f..e296a9285b 100644 --- a/src/anomalib/data/predict.py +++ b/src/anomalib/data/predict.py @@ -7,7 +7,6 @@ from pathlib import Path from torch.utils.data.dataset import Dataset -from torchvision.transforms.v2 import Transform from anomalib.data import ImageBatch, ImageItem from anomalib.data.utils import get_image_filenames, read_image @@ -18,8 +17,6 @@ class PredictDataset(Dataset): Args: path (str | Path): Path to an image or image-folder. - transform (A.Compose | None, optional): Transform object describing the transforms that are - applied to the inputs. image_size (int | tuple[int, int] | None, optional): Target image size to resize the original image. Defaults to None. """ @@ -27,13 +24,11 @@ class PredictDataset(Dataset): def __init__( self, path: str | Path, - transform: Transform | None = None, image_size: int | tuple[int, int] = (256, 256), ) -> None: super().__init__() self.image_filenames = get_image_filenames(path) - self.transform = transform self.image_size = image_size def __len__(self) -> int: @@ -44,13 +39,8 @@ def __getitem__(self, index: int) -> ImageItem: """Get the image based on the `index`.""" image_filename = self.image_filenames[index] image = read_image(image_filename, as_tensor=True) - if self.transform: - image = self.transform(image) - return ImageItem( - image=image, - image_path=str(image_filename), - ) + return ImageItem(image=image, image_path=str(image_filename)) @property def collate_fn(self) -> Callable: diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index a27b77baf2..e7efece2ee 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -7,6 +7,7 @@ import logging from abc import ABC, abstractmethod from collections import OrderedDict +from collections.abc import Sequence from pathlib import Path from typing import TYPE_CHECKING, Any @@ -22,12 +23,11 @@ from anomalib.data import Batch, InferenceBatch from anomalib.metrics.threshold import Threshold from anomalib.post_processing import OneClassPostProcessor, PostProcessor +from anomalib.pre_processing import PreProcessor from .export_mixin import ExportMixin if TYPE_CHECKING: - from lightning.pytorch.callbacks import Callback - from anomalib.metrics import AnomalibMetricCollection logger = logging.getLogger(__name__) @@ -39,7 +39,11 @@ class AnomalyModule(ExportMixin, pl.LightningModule, ABC): Acts as a base class for all the Anomaly Modules in the library. """ - def __init__(self, post_processor: PostProcessor | None = None) -> None: + def __init__( + self, + pre_processor: PreProcessor | None = None, + post_processor: PostProcessor | None = None, + ) -> None: super().__init__() logger.info("Initializing %s model.", self.__class__.__name__) @@ -51,6 +55,7 @@ def __init__(self, post_processor: PostProcessor | None = None) -> None: self.image_metrics: AnomalibMetricCollection self.pixel_metrics: AnomalibMetricCollection + self.pre_processor = pre_processor or self.configure_pre_processor() self.post_processor = post_processor or self.default_post_processor() self._transform: Transform | None = None @@ -79,6 +84,10 @@ def _setup(self) -> None: initialization. """ + def configure_callbacks(self) -> Sequence[Callback] | Callback: + """Configure default callbacks for AnomalyModule.""" + return [self.pre_processor] + def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch: """Perform the forward-pass by passing input tensor to the module. @@ -183,23 +192,23 @@ def set_transform(self, transform: Transform) -> None: """Update the transform linked to the model instance.""" self._transform = transform - def configure_transforms(self, image_size: tuple[int, int] | None = None) -> Transform: # noqa: PLR6301 - """Default transforms. + def configure_pre_processor(self, image_size: tuple[int, int] | None = None) -> PreProcessor: # noqa: PLR6301 + """Configure the pre-processor. - The default transform is resize to 256x256 and normalize to ImageNet stats. Individual models can override - this method to provide custom transforms. + The default pre-processor is resize to 256x256 and normalize to ImageNet stats. Individual models can override + this method to provide custom transforms and pre-processing pipelines. """ logger.warning( - "No implementation of `configure_transforms` was provided in the Lightning model. Using default " + "No implementation of `configure_pre_processor` was provided in the Lightning model. Using default " "transforms from the base class. This may not be suitable for your use case. Please override " - "`configure_transforms` in your model.", + "`configure_pre_processor` in your model.", ) image_size = image_size or (256, 256) - return Compose( - [ + return PreProcessor( + transform=Compose([ Resize(image_size, antialias=True), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ], + ]), ) def default_post_processor(self) -> PostProcessor: @@ -220,7 +229,7 @@ def input_size(self) -> tuple[int, int] | None: The effective input size is the size of the input tensor after the transform has been applied. If the transform is not set, or if the transform does not change the shape of the input tensor, this method will return None. """ - transform = self.transform or self.configure_transforms() + transform = self.transform or self.configure_pre_processor() if transform is None: return None dummy_input = torch.zeros(1, 3, 1, 1) diff --git a/src/anomalib/models/image/cfa/lightning_model.py b/src/anomalib/models/image/cfa/lightning_model.py index e367762484..9f8d5818b8 100644 --- a/src/anomalib/models/image/cfa/lightning_model.py +++ b/src/anomalib/models/image/cfa/lightning_model.py @@ -17,6 +17,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import CfaLoss from .torch_model import CfaModel @@ -42,6 +43,9 @@ class Cfa(AnomalyModule): Defaults to ``3``. radius (float): Radius of the hypersphere to search the soft boundary. Defaults to ``1e-5``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -52,8 +56,9 @@ def __init__( num_nearest_neighbors: int = 3, num_hard_negative_features: int = 3, radius: float = 1e-5, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: CfaModel = CfaModel( backbone=backbone, gamma_c=gamma_c, diff --git a/src/anomalib/models/image/cflow/lightning_model.py b/src/anomalib/models/image/cflow/lightning_model.py index edb4788111..9995b747b7 100644 --- a/src/anomalib/models/image/cflow/lightning_model.py +++ b/src/anomalib/models/image/cflow/lightning_model.py @@ -24,6 +24,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .torch_model import CflowModel from .utils import get_logp, positional_encoding_2d @@ -57,6 +58,7 @@ class Cflow(AnomalyModule): def __init__( self, + pre_processor: PreProcessor | None = None, backbone: str = "wide_resnet50_2", layers: Sequence[str] = ("layer2", "layer3", "layer4"), pre_trained: bool = True, @@ -68,7 +70,7 @@ def __init__( permute_soft: bool = False, lr: float = 0.0001, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: CflowModel = CflowModel( backbone=backbone, diff --git a/src/anomalib/models/image/csflow/lightning_model.py b/src/anomalib/models/image/csflow/lightning_model.py index 3244ef7da7..00f7eb2dea 100644 --- a/src/anomalib/models/image/csflow/lightning_model.py +++ b/src/anomalib/models/image/csflow/lightning_model.py @@ -15,6 +15,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import CsFlowLoss from .torch_model import CsFlowModel @@ -44,8 +45,9 @@ def __init__( n_coupling_blocks: int = 4, clamp: int = 3, num_channels: int = 3, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.cross_conv_hidden_channels = cross_conv_hidden_channels self.n_coupling_blocks = n_coupling_blocks diff --git a/src/anomalib/models/image/dfkde/lightning_model.py b/src/anomalib/models/image/dfkde/lightning_model.py index 210242ec5f..13143d1b1b 100644 --- a/src/anomalib/models/image/dfkde/lightning_model.py +++ b/src/anomalib/models/image/dfkde/lightning_model.py @@ -14,6 +14,7 @@ from anomalib.data import Batch from anomalib.models.components import AnomalyModule, MemoryBankMixin from anomalib.models.components.classification import FeatureScalingMethod +from anomalib.pre_processing import PreProcessor from .torch_model import DfkdeModel @@ -46,8 +47,9 @@ def __init__( n_pca_components: int = 16, feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE, max_training_points: int = 40000, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model = DfkdeModel( layers=layers, diff --git a/src/anomalib/models/image/dfm/lightning_model.py b/src/anomalib/models/image/dfm/lightning_model.py index 64777fda87..104a8221fe 100644 --- a/src/anomalib/models/image/dfm/lightning_model.py +++ b/src/anomalib/models/image/dfm/lightning_model.py @@ -15,6 +15,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule, MemoryBankMixin +from anomalib.pre_processing import PreProcessor from .torch_model import DFMModel @@ -37,6 +38,9 @@ class Dfm(MemoryBankMixin, AnomalyModule): Defaults to ``0.97``. score_type (str, optional): Scoring type. Options are `fre` and `nll`. Defaults to ``fre``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -47,8 +51,9 @@ def __init__( pooling_kernel_size: int = 4, pca_level: float = 0.97, score_type: str = "fre", + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: DFMModel = DFMModel( backbone=backbone, diff --git a/src/anomalib/models/image/draem/lightning_model.py b/src/anomalib/models/image/draem/lightning_model.py index 1ee025d117..c125b99847 100644 --- a/src/anomalib/models/image/draem/lightning_model.py +++ b/src/anomalib/models/image/draem/lightning_model.py @@ -17,6 +17,7 @@ from anomalib.data import Batch from anomalib.data.utils import Augmenter from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import DraemLoss from .torch_model import DraemModel @@ -35,6 +36,9 @@ class Draem(AnomalyModule): anomaly_source_path (str | None): Path to folder that contains the anomaly source images. Random noise will be used if left empty. Defaults to ``None``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -43,8 +47,9 @@ def __init__( sspcab_lambda: float = 0.1, anomaly_source_path: str | None = None, beta: float | tuple[float, float] = (0.1, 1.0), + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.augmenter = Augmenter(anomaly_source_path, beta=beta) self.model = DraemModel(sspcab=enable_sspcab) diff --git a/src/anomalib/models/image/dsr/lightning_model.py b/src/anomalib/models/image/dsr/lightning_model.py index a0c41bfc66..91b4d49c97 100644 --- a/src/anomalib/models/image/dsr/lightning_model.py +++ b/src/anomalib/models/image/dsr/lightning_model.py @@ -21,6 +21,7 @@ from anomalib.models.image.dsr.anomaly_generator import DsrAnomalyGenerator from anomalib.models.image.dsr.loss import DsrSecondStageLoss, DsrThirdStageLoss from anomalib.models.image.dsr.torch_model import DsrModel +from anomalib.pre_processing import PreProcessor __all__ = ["Dsr"] @@ -39,10 +40,18 @@ class Dsr(AnomalyModule): Args: latent_anomaly_strength (float): Strength of the generated anomalies in the latent space. Defaults to 0.2 upsampling_train_ratio (float): Ratio of training steps for the upsampling module. Defaults to 0.7 + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ - def __init__(self, latent_anomaly_strength: float = 0.2, upsampling_train_ratio: float = 0.7) -> None: - super().__init__() + def __init__( + self, + latent_anomaly_strength: float = 0.2, + upsampling_train_ratio: float = 0.7, + pre_processor: PreProcessor | None = None, + ) -> None: + super().__init__(pre_processor=pre_processor) self.automatic_optimization = False self.upsampling_train_ratio = upsampling_train_ratio diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index 1fe6753438..3e8407b36f 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -15,12 +15,13 @@ from lightning.pytorch.utilities.types import STEP_OUTPUT from torch.utils.data import DataLoader from torchvision.datasets import ImageFolder -from torchvision.transforms.v2 import CenterCrop, Compose, Normalize, RandomGrayscale, Resize, ToTensor, Transform +from torchvision.transforms.v2 import CenterCrop, Compose, Normalize, RandomGrayscale, Resize, ToTensor from anomalib import LearningType from anomalib.data import Batch from anomalib.data.utils import DownloadInfo, download_and_extract from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .torch_model import EfficientAdModel, EfficientAdModelSize, reduce_tensor_elems @@ -58,6 +59,9 @@ class EfficientAd(AnomalyModule): pad_maps (bool): relevant if padding is set to False. In this case, pad_maps = True pads the output anomaly maps so that their size matches the size in the padding = True case. Defaults to ``True``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -69,8 +73,9 @@ def __init__( weight_decay: float = 0.00001, padding: bool = False, pad_maps: bool = True, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.imagenet_dir = Path(imagenet_dir) if not isinstance(model_size, EfficientAdModelSize): @@ -203,6 +208,13 @@ def _get_quantiles_of_maps(self, maps: list[torch.Tensor]) -> tuple[torch.Tensor qb = torch.quantile(maps_flat, q=0.995).to(self.device) return qa, qb + @staticmethod + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Default transform for EfficientAd. Imagenet normalization applied in forward.""" + image_size = image_size or (256, 256) + transform = Compose([Resize(image_size, antialias=True)]) + return PreProcessor(transform=transform) + def configure_optimizers(self) -> torch.optim.Optimizer: """Configure optimizers.""" optimizer = torch.optim.Adam( @@ -318,13 +330,3 @@ def learning_type(self) -> LearningType: LearningType: Learning type of the model. """ return LearningType.ONE_CLASS - - @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Default transform for EfficientAd. Imagenet normalization applied in forward.""" - image_size = image_size or (256, 256) - return Compose( - [ - Resize(image_size, antialias=True), - ], - ) diff --git a/src/anomalib/models/image/fastflow/lightning_model.py b/src/anomalib/models/image/fastflow/lightning_model.py index 577daaeb5f..f48992d156 100644 --- a/src/anomalib/models/image/fastflow/lightning_model.py +++ b/src/anomalib/models/image/fastflow/lightning_model.py @@ -15,6 +15,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import FastflowLoss from .torch_model import FastflowModel @@ -33,7 +34,10 @@ class Fastflow(AnomalyModule): conv3x3_only (bool, optinoal): Use only conv3x3 in fast_flow model. Defaults to ``False``. hidden_ratio (float, optional): Ratio to calculate hidden var channels. - Defaults to ``1.0`. + Defaults to ``1.0``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -43,8 +47,9 @@ def __init__( flow_steps: int = 8, conv3x3_only: bool = False, hidden_ratio: float = 1.0, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.backbone = backbone self.pre_trained = pre_trained diff --git a/src/anomalib/models/image/fre/lightning_model.py b/src/anomalib/models/image/fre/lightning_model.py index 20c383b128..c88a0f4de4 100755 --- a/src/anomalib/models/image/fre/lightning_model.py +++ b/src/anomalib/models/image/fre/lightning_model.py @@ -16,6 +16,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .torch_model import FREModel @@ -39,6 +40,9 @@ class Fre(AnomalyModule): latent_dim (int, optional): Reduced size of feature after applying dimensionality reduction via shallow linear autoencoder. Defaults to ``220``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -49,8 +53,9 @@ def __init__( pooling_kernel_size: int = 2, input_dim: int = 65536, latent_dim: int = 220, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: FREModel = FREModel( backbone=backbone, diff --git a/src/anomalib/models/image/ganomaly/lightning_model.py b/src/anomalib/models/image/ganomaly/lightning_model.py index 5633c003ac..84fd95738f 100644 --- a/src/anomalib/models/image/ganomaly/lightning_model.py +++ b/src/anomalib/models/image/ganomaly/lightning_model.py @@ -16,6 +16,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import DiscriminatorLoss, GeneratorLoss from .torch_model import GanomalyModel @@ -49,6 +50,9 @@ class Ganomaly(AnomalyModule): Defaults to ``0.5``. beta2 (float, optional): Adam beta2. Defaults to ``0.999``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -64,8 +68,9 @@ def __init__( lr: float = 0.0002, beta1: float = 0.5, beta2: float = 0.999, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.n_features = n_features self.latent_vec_size = latent_vec_size diff --git a/src/anomalib/models/image/padim/lightning_model.py b/src/anomalib/models/image/padim/lightning_model.py index 9cd326cf83..0b0d709e20 100644 --- a/src/anomalib/models/image/padim/lightning_model.py +++ b/src/anomalib/models/image/padim/lightning_model.py @@ -10,12 +10,13 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT -from torchvision.transforms.v2 import Compose, Normalize, Resize, Transform +from torchvision.transforms.v2 import Compose, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule, MemoryBankMixin from anomalib.post_processing.one_class import OneClassPostProcessor +from anomalib.pre_processing import PreProcessor from .torch_model import PadimModel @@ -37,6 +38,9 @@ class Padim(MemoryBankMixin, AnomalyModule): n_features (int, optional): Number of features to retain in the dimension reduction step. Default values from the paper are available for: resnet18 (100), wide_resnet50_2 (550). Defaults to ``None``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -45,8 +49,9 @@ def __init__( layers: list[str] = ["layer1", "layer2", "layer3"], # noqa: B006 pre_trained: bool = True, n_features: int | None = None, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: PadimModel = PadimModel( backbone=backbone, @@ -125,14 +130,14 @@ def learning_type(self) -> LearningType: return LearningType.ONE_CLASS @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Default transform for Padim.""" + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Default pre-processor for Padim.""" image_size = image_size or (256, 256) - return Compose( - [ + return PreProcessor( + transform=Compose([ Resize(image_size, antialias=True), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ], + ]), ) @staticmethod diff --git a/src/anomalib/models/image/patchcore/lightning_model.py b/src/anomalib/models/image/patchcore/lightning_model.py index 6b3b76e920..f788870b43 100644 --- a/src/anomalib/models/image/patchcore/lightning_model.py +++ b/src/anomalib/models/image/patchcore/lightning_model.py @@ -12,12 +12,13 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT -from torchvision.transforms.v2 import CenterCrop, Compose, Normalize, Resize, Transform +from torchvision.transforms.v2 import CenterCrop, Compose, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule, MemoryBankMixin from anomalib.post_processing.one_class import OneClassPostProcessor +from anomalib.pre_processing import PreProcessor from .torch_model import PatchcoreModel @@ -38,6 +39,9 @@ class Patchcore(MemoryBankMixin, AnomalyModule): Defaults to ``0.1``. num_neighbors (int, optional): Number of nearest neighbors. Defaults to ``9``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -47,8 +51,9 @@ def __init__( pre_trained: bool = True, coreset_sampling_ratio: float = 0.1, num_neighbors: int = 9, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: PatchcoreModel = PatchcoreModel( backbone=backbone, @@ -59,6 +64,20 @@ def __init__( self.coreset_sampling_ratio = coreset_sampling_ratio self.embeddings: list[torch.Tensor] = [] + @staticmethod + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Default transform for Padim.""" + image_size = image_size or (256, 256) + # scale center crop size proportional to image size + height, width = image_size + center_crop_size = (int(height * (224 / 256)), int(width * (224 / 256))) + transform = Compose([ + Resize(image_size, antialias=True), + CenterCrop(center_crop_size), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + return PreProcessor(transform=transform) + @staticmethod def configure_optimizers() -> None: """Configure optimizers. @@ -125,21 +144,6 @@ def learning_type(self) -> LearningType: """ return LearningType.ONE_CLASS - @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Default transform for Padim.""" - image_size = image_size or (256, 256) - # scale center crop size proportional to image size - height, width = image_size - center_crop_size = (int(height * (224 / 256)), int(width * (224 / 256))) - return Compose( - [ - Resize(image_size, antialias=True), - CenterCrop(center_crop_size), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ], - ) - @staticmethod def default_post_processor() -> OneClassPostProcessor: """Return the default post-processor for the model. diff --git a/src/anomalib/models/image/reverse_distillation/lightning_model.py b/src/anomalib/models/image/reverse_distillation/lightning_model.py index c1ba797a03..5c7d63f598 100644 --- a/src/anomalib/models/image/reverse_distillation/lightning_model.py +++ b/src/anomalib/models/image/reverse_distillation/lightning_model.py @@ -15,6 +15,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .anomaly_map import AnomalyMapGenerationMode from .loss import ReverseDistillationLoss @@ -33,6 +34,9 @@ class ReverseDistillation(AnomalyModule): Defaults to ``AnomalyMapGenerationMode.ADD``. pre_trained (bool, optional): Boolean to check whether to use a pre_trained backbone. Defaults to ``True``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -41,8 +45,9 @@ def __init__( layers: Sequence[str] = ("layer1", "layer2", "layer3"), anomaly_map_mode: AnomalyMapGenerationMode = AnomalyMapGenerationMode.ADD, pre_trained: bool = True, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.backbone = backbone self.pre_trained = pre_trained diff --git a/src/anomalib/models/image/rkde/lightning_model.py b/src/anomalib/models/image/rkde/lightning_model.py index 02ad6c2564..faf39c5497 100644 --- a/src/anomalib/models/image/rkde/lightning_model.py +++ b/src/anomalib/models/image/rkde/lightning_model.py @@ -15,6 +15,7 @@ from anomalib import LearningType from anomalib.models.components import AnomalyModule, MemoryBankMixin from anomalib.models.components.classification import FeatureScalingMethod +from anomalib.pre_processing import PreProcessor from .region_extractor import RoiStage from .torch_model import RkdeModel @@ -44,6 +45,9 @@ class Rkde(MemoryBankMixin, AnomalyModule): Defaults to ``FeatureScalingMethod.SCALE``. max_training_points (int, optional): Maximum number of training points to fit the KDE model. Defaults to ``40000``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -56,8 +60,9 @@ def __init__( n_pca_components: int = 16, feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE, max_training_points: int = 40000, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model: RkdeModel = RkdeModel( roi_stage=roi_stage, diff --git a/src/anomalib/models/image/stfpm/lightning_model.py b/src/anomalib/models/image/stfpm/lightning_model.py index 42fc3c0c3d..a13b0519b3 100644 --- a/src/anomalib/models/image/stfpm/lightning_model.py +++ b/src/anomalib/models/image/stfpm/lightning_model.py @@ -16,6 +16,7 @@ from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import STFPMLoss from .torch_model import STFPMModel @@ -31,14 +32,18 @@ class Stfpm(AnomalyModule): Defaults to ``resnet18``. layers (list[str]): Layers to extract features from the backbone CNN Defaults to ``["layer1", "layer2", "layer3"]``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( self, backbone: str = "resnet18", layers: Sequence[str] = ("layer1", "layer2", "layer3"), + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model = STFPMModel( backbone=backbone, diff --git a/src/anomalib/models/image/uflow/lightning_model.py b/src/anomalib/models/image/uflow/lightning_model.py index b7368b1e4d..a0c2034c36 100644 --- a/src/anomalib/models/image/uflow/lightning_model.py +++ b/src/anomalib/models/image/uflow/lightning_model.py @@ -13,11 +13,12 @@ from lightning.pytorch.core.optimizer import LightningOptimizer from lightning.pytorch.utilities.types import STEP_OUTPUT from torch.optim.lr_scheduler import LRScheduler -from torchvision.transforms.v2 import Compose, Normalize, Resize, Transform +from torchvision.transforms.v2 import Compose, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch from anomalib.models.components import AnomalyModule +from anomalib.pre_processing import PreProcessor from .loss import UFlowLoss from .torch_model import UflowModel @@ -37,6 +38,7 @@ def __init__( affine_clamp: float = 2.0, affine_subnet_channels_ratio: float = 1.0, permute_soft: bool = False, + pre_processor: PreProcessor | None = None, ) -> None: """Uflow model. @@ -46,8 +48,11 @@ def __init__( affine_clamp (float): Affine clamp. affine_subnet_channels_ratio (float): Affine subnet channels ratio. permute_soft (bool): Whether to use soft permutation. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ - super().__init__() + super().__init__(pre_processor=pre_processor) self.backbone = backbone self.flow_steps = flow_steps @@ -73,17 +78,16 @@ def _setup(self) -> None: permute_soft=self.permute_soft, ) - def training_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: # noqa: ARG002 | unused arguments - """Training step.""" - z, ljd = self.model(batch.image) - loss = self.loss(z, ljd) - self.log_dict({"loss": loss}, on_step=True, on_epoch=False, prog_bar=False, logger=True) - return {"loss": loss} - - def validation_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: # noqa: ARG002 | unused arguments - """Validation step.""" - predictions = self.model(batch.image) - return batch.update(**predictions._asdict()) + @staticmethod + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Default pre-processor for UFlow.""" + if image_size is not None: + logger.warning("Image size is not used in UFlow. The input image size is determined by the model.") + transform = Compose([ + Resize((448, 448), antialias=True), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + return PreProcessor(transform=transform) def configure_optimizers(self) -> tuple[list[LightningOptimizer], list[LRScheduler]]: """Return optimizer and scheduler.""" @@ -103,6 +107,18 @@ def configure_optimizers(self) -> tuple[list[LightningOptimizer], list[LRSchedul ) return [optimizer], [scheduler] + def training_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: # noqa: ARG002 | unused arguments + """Training step.""" + z, ljd = self.model(batch.image) + loss = self.loss(z, ljd) + self.log_dict({"loss": loss}, on_step=True, on_epoch=False, prog_bar=False, logger=True) + return {"loss": loss} + + def validation_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: # noqa: ARG002 | unused arguments + """Validation step.""" + predictions = self.model(batch.image) + return batch.update(**predictions._asdict()) + @property def trainer_arguments(self) -> dict[str, Any]: """Return EfficientAD trainer arguments.""" @@ -116,15 +132,3 @@ def learning_type(self) -> LearningType: LearningType: Learning type of the model. """ return LearningType.ONE_CLASS - - @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Default transform for Padim.""" - if image_size is not None: - logger.warning("Image size is not used in UFlow. The input image size is determined by the model.") - return Compose( - [ - Resize((448, 448), antialias=True), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ], - ) diff --git a/src/anomalib/models/image/winclip/lightning_model.py b/src/anomalib/models/image/winclip/lightning_model.py index 222d887017..0a5ab24be7 100644 --- a/src/anomalib/models/image/winclip/lightning_model.py +++ b/src/anomalib/models/image/winclip/lightning_model.py @@ -13,13 +13,14 @@ import torch from torch.utils.data import DataLoader -from torchvision.transforms.v2 import Compose, InterpolationMode, Normalize, Resize, Transform +from torchvision.transforms.v2 import Compose, InterpolationMode, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch from anomalib.data.predict import PredictDataset from anomalib.models.components import AnomalyModule from anomalib.post_processing import OneClassPostProcessor +from anomalib.pre_processing import PreProcessor from .torch_model import WinClipModel @@ -40,6 +41,9 @@ class WinClip(AnomalyModule): Defaults to ``(2, 3)``. few_shot_source (str | Path, optional): Path to a folder of reference images used for few-shot inference. Defaults to ``None``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ EXCLUDE_FROM_STATE_DICT = frozenset({"model.clip"}) @@ -50,8 +54,9 @@ def __init__( k_shot: int = 0, scales: tuple = (2, 3), few_shot_source: Path | str | None = None, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model = WinClipModel(scales=scales, apply_transform=False) self.class_name = class_name self.k_shot = k_shot @@ -74,7 +79,7 @@ def _setup(self) -> None: if self.k_shot: if self.few_shot_source: logger.info("Loading reference images from %s", self.few_shot_source) - reference_dataset = PredictDataset(self.few_shot_source, transform=self.model.transform) + reference_dataset = PredictDataset(self.few_shot_source) dataloader = DataLoader(reference_dataset, batch_size=1, shuffle=False) else: logger.info("Collecting reference images from training dataset") @@ -171,15 +176,16 @@ def load_state_dict(self, state_dict: OrderedDict[str, Any], strict: bool = True return super().load_state_dict(state_dict, strict) @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Configure the default transforms used by the model.""" + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Configure the default pre-processor used by the model.""" if image_size is not None: logger.warning("Image size is not used in WinCLIP. The input image size is determined by the model.") - return Compose( - [ + + return PreProcessor( + transform=Compose([ Resize((240, 240), antialias=True, interpolation=InterpolationMode.BICUBIC), Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)), - ], + ]), ) @staticmethod diff --git a/src/anomalib/models/video/ai_vad/lightning_model.py b/src/anomalib/models/video/ai_vad/lightning_model.py index 6b4ea8785e..7d59307a38 100644 --- a/src/anomalib/models/video/ai_vad/lightning_model.py +++ b/src/anomalib/models/video/ai_vad/lightning_model.py @@ -11,12 +11,12 @@ from typing import Any from lightning.pytorch.utilities.types import STEP_OUTPUT -from torchvision.transforms.v2 import Transform from anomalib import LearningType from anomalib.data import VideoBatch from anomalib.models.components import AnomalyModule, MemoryBankMixin from anomalib.post_processing.one_class import OneClassPostProcessor, PostProcessor +from anomalib.pre_processing import PreProcessor from .torch_model import AiVadModel @@ -59,6 +59,9 @@ class AiVad(MemoryBankMixin, AnomalyModule): Defaults to ``1``. n_neighbors_deep (int): Number of neighbors used in KNN density estimation for deep features. Defaults to ``1``. + pre_processor (PreProcessor, optional): Pre-processor for the model. + This is used to pre-process the input data before it is passed to the model. + Defaults to ``None``. """ def __init__( @@ -77,8 +80,9 @@ def __init__( n_components_velocity: int = 2, n_neighbors_pose: int = 1, n_neighbors_deep: int = 1, + pre_processor: PreProcessor | None = None, ) -> None: - super().__init__() + super().__init__(pre_processor=pre_processor) self.model = AiVadModel( box_score_thresh=box_score_thresh, @@ -165,10 +169,14 @@ def learning_type(self) -> LearningType: return LearningType.ONE_CLASS @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform | None: - """AI-VAD does not need a transform, as the region- and feature-extractors apply their own transforms.""" + def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + """Configure the pre-processor for AI-VAD. + + AI-VAD does not need a pre-processor or transforms, as the region- and + feature-extractors apply their own transforms. + """ del image_size - return None + return PreProcessor() # A pre-processor with no transforms. @staticmethod def default_post_processor() -> PostProcessor: From 6f7399aff2186d05d21c3c9c1fc4dc435321b52d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 10 Oct 2024 15:02:07 +0100 Subject: [PATCH 07/59] Fix tests Signed-off-by: Samet Akcay --- configs/data/avenue.yaml | 3 - configs/data/btech.yaml | 3 - configs/data/folder.yaml | 3 - configs/data/kolektor.yaml | 3 - configs/data/mvtec.yaml | 3 - configs/data/mvtec_3d.yaml | 3 - configs/data/shanghaitech.yaml | 3 - configs/data/ucsd_ped.yaml | 3 - configs/data/visa.yaml | 3 - src/anomalib/data/datasets/base/depth.py | 18 +- src/anomalib/data/datasets/base/image.py | 10 +- src/anomalib/data/datasets/base/video.py | 13 +- src/anomalib/data/predict.py | 7 + src/anomalib/data/utils/synthetic.py | 10 +- .../models/components/base/anomaly_module.py | 3 +- .../models/components/base/export_mixin.py | 10 +- .../models/image/winclip/lightning_model.py | 13 +- .../data/datamodule/depth/test_folder_3d.py | 1 - .../data/datamodule/depth/test_mvtec_3d.py | 1 - .../unit/data/datamodule/image/test_btech.py | 1 - .../data/datamodule/image/test_kolektor.py | 1 - tests/unit/data/datamodule/image/test_visa.py | 1 - .../unit/data/datamodule/video/test_avenue.py | 1 - .../datamodule/video/test_shanghaitech.py | 1 - .../data/datamodule/video/test_ucsdped.py | 1 - tests/unit/engine/test_setup_transform.py | 260 ------------------ 26 files changed, 60 insertions(+), 319 deletions(-) delete mode 100644 tests/unit/engine/test_setup_transform.py diff --git a/configs/data/avenue.yaml b/configs/data/avenue.yaml index 396a9ba6b5..8fb07660ce 100644 --- a/configs/data/avenue.yaml +++ b/configs/data/avenue.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: from_test val_split_ratio: 0.5 seed: null diff --git a/configs/data/btech.yaml b/configs/data/btech.yaml index 22bfd0d8fe..9aa030540c 100644 --- a/configs/data/btech.yaml +++ b/configs/data/btech.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/folder.yaml b/configs/data/folder.yaml index 329fba6520..76be1382a7 100644 --- a/configs/data/folder.yaml +++ b/configs/data/folder.yaml @@ -12,9 +12,6 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/kolektor.yaml b/configs/data/kolektor.yaml index 1b2e6fe6b4..5daec435e4 100644 --- a/configs/data/kolektor.yaml +++ b/configs/data/kolektor.yaml @@ -4,9 +4,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec.yaml b/configs/data/mvtec.yaml index 7728808ece..5fb206e144 100644 --- a/configs/data/mvtec.yaml +++ b/configs/data/mvtec.yaml @@ -6,9 +6,6 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec_3d.yaml b/configs/data/mvtec_3d.yaml index d880f92f8f..f567f80899 100644 --- a/configs/data/mvtec_3d.yaml +++ b/configs/data/mvtec_3d.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/shanghaitech.yaml b/configs/data/shanghaitech.yaml index be4da54311..d18e7671dc 100644 --- a/configs/data/shanghaitech.yaml +++ b/configs/data/shanghaitech.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/ucsd_ped.yaml b/configs/data/ucsd_ped.yaml index 009a5ef224..1226e4f149 100644 --- a/configs/data/ucsd_ped.yaml +++ b/configs/data/ucsd_ped.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 8 eval_batch_size: 1 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/visa.yaml b/configs/data/visa.yaml index c5656a2158..0d94e82fa4 100644 --- a/configs/data/visa.yaml +++ b/configs/data/visa.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/src/anomalib/data/datasets/base/depth.py b/src/anomalib/data/datasets/base/depth.py index cc913f3eaa..56460b3a6a 100644 --- a/src/anomalib/data/datasets/base/depth.py +++ b/src/anomalib/data/datasets/base/depth.py @@ -9,6 +9,7 @@ import torch from PIL import Image from torchvision.transforms.functional import to_tensor +from torchvision.transforms.v2 import Transform from torchvision.tv_tensors import Mask from anomalib import TaskType @@ -23,10 +24,14 @@ class AnomalibDepthDataset(AnomalibDataset, ABC): Args: task (str): Task type, either 'classification' or 'segmentation' + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. """ - def __init__(self, task: TaskType) -> None: - super().__init__(task) + def __init__(self, task: TaskType, transform: Transform | None = None) -> None: + super().__init__(task, transform) + + self.transform = transform def __getitem__(self, index: int) -> DepthItem: """Return rgb image, depth image and mask. @@ -47,7 +52,9 @@ def __getitem__(self, index: int) -> DepthItem: item = {"image_path": image_path, "depth_path": depth_path, "label": label_index} if self.task == TaskType.CLASSIFICATION: - item["image"], item["depth_image"] = image, depth_image + item["image"], item["depth_image"] = ( + self.transform(image, depth_image) if self.transform else (image, depth_image) + ) elif self.task == TaskType.SEGMENTATION: # Only Anomalous (1) images have masks in anomaly datasets # Therefore, create empty mask for Normal (0) images. @@ -56,8 +63,11 @@ def __getitem__(self, index: int) -> DepthItem: if label_index == LabelName.NORMAL else Mask(to_tensor(Image.open(mask_path)).squeeze()) ) - item["image"], item["depth_image"], item["mask"] = image, depth_image, mask + item["image"], item["depth_image"], item["mask"] = ( + self.transform(image, depth_image, mask) if self.transform else (image, depth_image, mask) + ) item["mask_path"] = mask_path + else: msg = f"Unknown task type: {self.task}" raise ValueError(msg) diff --git a/src/anomalib/data/datasets/base/image.py b/src/anomalib/data/datasets/base/image.py index ade50c00ff..5aaabc8fe4 100644 --- a/src/anomalib/data/datasets/base/image.py +++ b/src/anomalib/data/datasets/base/image.py @@ -13,6 +13,7 @@ import torch from pandas import DataFrame from torch.utils.data import Dataset +from torchvision.transforms.v2 import Transform from torchvision.tv_tensors import Mask from anomalib import TaskType @@ -57,11 +58,14 @@ class AnomalibDataset(Dataset, ABC): Args: task (str): Task type, either 'classification' or 'segmentation' + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. """ - def __init__(self, task: TaskType | str) -> None: + def __init__(self, task: TaskType | str, transform: Transform | None = None) -> None: super().__init__() self.task = TaskType(task) + self.transform = transform self._samples: DataFrame | None = None self._category: str | None = None @@ -166,7 +170,7 @@ def __getitem__(self, index: int) -> DatasetItem: item = {"image_path": image_path, "gt_label": label_index} if self.task == TaskType.CLASSIFICATION: - item["image"] = image + item["image"] = self.transform(image) if self.transform else image elif self.task == TaskType.SEGMENTATION: # Only Anomalous (1) images have masks in anomaly datasets # Therefore, create empty mask for Normal (0) images. @@ -175,7 +179,7 @@ def __getitem__(self, index: int) -> DatasetItem: if label_index == LabelName.NORMAL else read_mask(mask_path, as_tensor=True) ) - item["image"], item["gt_mask"] = image, mask + item["image"], item["gt_mask"] = self.transform(image, mask) if self.transform else (image, mask) else: msg = f"Unknown task type: {self.task}" diff --git a/src/anomalib/data/datasets/base/video.py b/src/anomalib/data/datasets/base/video.py index 5ab1193d7c..3ba8f2fd83 100644 --- a/src/anomalib/data/datasets/base/video.py +++ b/src/anomalib/data/datasets/base/video.py @@ -9,7 +9,9 @@ import torch from pandas import DataFrame +from torchvision.transforms.v2 import Transform from torchvision.transforms.v2.functional import to_dtype, to_dtype_video +from torchvision.tv_tensors import Mask from anomalib import TaskType from anomalib.data.dataclasses import VideoBatch, VideoItem @@ -37,6 +39,8 @@ class AnomalibVideoDataset(AnomalibDataset, ABC): task (str): Task type, either 'classification' or 'segmentation' clip_length_in_frames (int): Number of video frames in each clip. frames_between_clips (int): Number of frames between each consecutive video clip. + transform (Transform, optional): Transforms that should be applied to the input clips. + Defaults to ``None``. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. Defaults to ``VideoTargetFrame.LAST``. """ @@ -46,12 +50,14 @@ def __init__( task: TaskType, clip_length_in_frames: int, frames_between_clips: int, + transform: Transform | None = None, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.clip_length_in_frames = clip_length_in_frames self.frames_between_clips = frames_between_clips + self.transform = transform self.indexer: ClipsIndexer | None = None self.indexer_cls: Callable | None = None @@ -147,8 +153,13 @@ def __getitem__(self, index: int) -> VideoItem: # include the untransformed image for visualization item.original_image = to_dtype(item.image, torch.uint8, scale=True) + # apply transforms if item.gt_mask is not None: + if self.transform: + item.image, item.gt_mask = self.transform(item.image, Mask(item.gt_mask)) item.gt_label = torch.Tensor([1 in frame for frame in item.gt_mask]).int().squeeze(0) + elif self.transform: + item.image = self.transform(item.image) # squeeze temporal dimensions in case clip length is 1 item.image = item.image.squeeze(0) diff --git a/src/anomalib/data/predict.py b/src/anomalib/data/predict.py index e296a9285b..645c5576b4 100644 --- a/src/anomalib/data/predict.py +++ b/src/anomalib/data/predict.py @@ -7,6 +7,7 @@ from pathlib import Path from torch.utils.data.dataset import Dataset +from torchvision.transforms.v2 import Transform from anomalib.data import ImageBatch, ImageItem from anomalib.data.utils import get_image_filenames, read_image @@ -17,6 +18,8 @@ class PredictDataset(Dataset): Args: path (str | Path): Path to an image or image-folder. + transform (A.Compose | None, optional): Transform object describing the transforms that are + applied to the inputs. image_size (int | tuple[int, int] | None, optional): Target image size to resize the original image. Defaults to None. """ @@ -24,11 +27,13 @@ class PredictDataset(Dataset): def __init__( self, path: str | Path, + transform: Transform | None = None, image_size: int | tuple[int, int] = (256, 256), ) -> None: super().__init__() self.image_filenames = get_image_filenames(path) + self.transform = transform self.image_size = image_size def __len__(self) -> int: @@ -39,6 +44,8 @@ def __getitem__(self, index: int) -> ImageItem: """Get the image based on the `index`.""" image_filename = self.image_filenames[index] image = read_image(image_filename, as_tensor=True) + if self.transform: + image = self.transform(image) return ImageItem(image=image, image_path=str(image_filename)) diff --git a/src/anomalib/data/utils/synthetic.py b/src/anomalib/data/utils/synthetic.py index c9626f81c1..16aa20d83d 100644 --- a/src/anomalib/data/utils/synthetic.py +++ b/src/anomalib/data/utils/synthetic.py @@ -16,6 +16,7 @@ import cv2 import pandas as pd from pandas import DataFrame, Series +from torchvision.transforms.v2 import Compose from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -79,7 +80,7 @@ def augment(sample: Series) -> Series: Returns: Series: DataFrame row with updated information about the augmented image. """ - # read image + # read and transform image image = read_image(sample.image_path, as_tensor=True) # apply anomalous perturbation aug_im, mask = augmenter.augment_batch(image.unsqueeze(0)) @@ -113,11 +114,12 @@ class SyntheticAnomalyDataset(AnomalibDataset): Args: task (str): Task type, either "classification" or "segmentation". + transform (A.Compose): Transform object describing the transforms that are applied to the inputs. source_samples (DataFrame): Normal samples to which the anomalous augmentations will be applied. """ - def __init__(self, task: TaskType, source_samples: DataFrame) -> None: - super().__init__(task) + def __init__(self, task: TaskType, transform: Compose, source_samples: DataFrame) -> None: + super().__init__(task, transform) self.source_samples = source_samples @@ -144,7 +146,7 @@ def from_dataset(cls: type["SyntheticAnomalyDataset"], dataset: AnomalibDataset) dataset (AnomalibDataset): Dataset consisting of only normal images that will be converrted to a synthetic anomalous dataset with a 50/50 normal anomalous split. """ - return cls(task=dataset.task, source_samples=dataset.samples) + return cls(task=dataset.task, transform=dataset.transform, source_samples=dataset.samples) def __copy__(self) -> "SyntheticAnomalyDataset": """Return a shallow copy of the dataset object and prevents cleanup when original object is deleted.""" diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index e7efece2ee..8a1c44ab06 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -100,7 +100,8 @@ def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch: Tensor: Output tensor from the model. """ del args, kwargs # These variables are not used. - batch = self.exportable_transform(batch) + if self.exportable_transform: + batch = self.exportable_transform(batch) batch = self.model(batch) return self.post_processor(batch) if self.post_processor else batch diff --git a/src/anomalib/models/components/base/export_mixin.py b/src/anomalib/models/components/base/export_mixin.py index bd44fb2a61..6dc674baa9 100644 --- a/src/anomalib/models/components/base/export_mixin.py +++ b/src/anomalib/models/components/base/export_mixin.py @@ -22,6 +22,7 @@ from anomalib.deploy.export import CompressionType, ExportType from anomalib.deploy.utils import make_transform_exportable from anomalib.metrics import create_metric_collection +from anomalib.pre_processing import PreProcessor from anomalib.utils.exceptions import try_import if TYPE_CHECKING: @@ -39,8 +40,8 @@ class ExportMixin: """This mixin allows exporting models to torch and ONNX/OpenVINO.""" model: nn.Module - transform: Transform - configure_transforms: Callable + pre_processor: PreProcessor + configure_pre_processor: Callable device: torch.device def to_torch( @@ -440,9 +441,10 @@ def _get_metadata( return metadata @property - def exportable_transform(self) -> Transform: + def exportable_transform(self) -> Transform | None: """Return the exportable transform.""" - return make_transform_exportable(self.transform) + transform = self.pre_processor.test_transform + return make_transform_exportable(transform) if transform else None def _write_metadata_to_json(metadata: dict[str, Any], export_root: Path) -> None: diff --git a/src/anomalib/models/image/winclip/lightning_model.py b/src/anomalib/models/image/winclip/lightning_model.py index 0a5ab24be7..50dd43e78a 100644 --- a/src/anomalib/models/image/winclip/lightning_model.py +++ b/src/anomalib/models/image/winclip/lightning_model.py @@ -79,7 +79,7 @@ def _setup(self) -> None: if self.k_shot: if self.few_shot_source: logger.info("Loading reference images from %s", self.few_shot_source) - reference_dataset = PredictDataset(self.few_shot_source) + reference_dataset = PredictDataset(self.few_shot_source, transform=self.pre_processor.test_transform) dataloader = DataLoader(reference_dataset, batch_size=1, shuffle=False) else: logger.info("Collecting reference images from training dataset") @@ -181,12 +181,11 @@ def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PrePro if image_size is not None: logger.warning("Image size is not used in WinCLIP. The input image size is determined by the model.") - return PreProcessor( - transform=Compose([ - Resize((240, 240), antialias=True, interpolation=InterpolationMode.BICUBIC), - Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)), - ]), - ) + transform = Compose([ + Resize((240, 240), antialias=True, interpolation=InterpolationMode.BICUBIC), + Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711)), + ]) + return PreProcessor(val_transform=transform, test_transform=transform) @staticmethod def default_post_processor() -> OneClassPostProcessor: diff --git a/tests/unit/data/datamodule/depth/test_folder_3d.py b/tests/unit/data/datamodule/depth/test_folder_3d.py index 6ed01bfff5..9ebf82e3f2 100644 --- a/tests/unit/data/datamodule/depth/test_folder_3d.py +++ b/tests/unit/data/datamodule/depth/test_folder_3d.py @@ -29,7 +29,6 @@ def datamodule(dataset_path: Path, task_type: TaskType) -> Folder3D: normal_depth_dir="train/good/xyz", abnormal_depth_dir="test/bad/xyz", normal_test_depth_dir="test/good/xyz", - image_size=256, train_batch_size=4, eval_batch_size=4, num_workers=0, diff --git a/tests/unit/data/datamodule/depth/test_mvtec_3d.py b/tests/unit/data/datamodule/depth/test_mvtec_3d.py index 70966b7774..6a94f1b279 100644 --- a/tests/unit/data/datamodule/depth/test_mvtec_3d.py +++ b/tests/unit/data/datamodule/depth/test_mvtec_3d.py @@ -23,7 +23,6 @@ def datamodule(dataset_path: Path, task_type: TaskType) -> MVTec3D: root=dataset_path / "mvtec_3d", category="dummy", task=task_type, - image_size=256, train_batch_size=4, eval_batch_size=4, num_workers=0, diff --git a/tests/unit/data/datamodule/image/test_btech.py b/tests/unit/data/datamodule/image/test_btech.py index cf7b207e1d..2f483da7c8 100644 --- a/tests/unit/data/datamodule/image/test_btech.py +++ b/tests/unit/data/datamodule/image/test_btech.py @@ -23,7 +23,6 @@ def datamodule(dataset_path: Path, task_type: TaskType) -> BTech: root=dataset_path / "btech", category="dummy", task=task_type, - image_size=256, train_batch_size=4, eval_batch_size=4, ) diff --git a/tests/unit/data/datamodule/image/test_kolektor.py b/tests/unit/data/datamodule/image/test_kolektor.py index 703c3927a3..7fc061c09d 100644 --- a/tests/unit/data/datamodule/image/test_kolektor.py +++ b/tests/unit/data/datamodule/image/test_kolektor.py @@ -22,7 +22,6 @@ def datamodule(dataset_path: Path, task_type: TaskType) -> Kolektor: _datamodule = Kolektor( root=dataset_path / "kolektor", task=task_type, - image_size=256, train_batch_size=4, eval_batch_size=4, ) diff --git a/tests/unit/data/datamodule/image/test_visa.py b/tests/unit/data/datamodule/image/test_visa.py index 0c663a6e54..8b173f38cc 100644 --- a/tests/unit/data/datamodule/image/test_visa.py +++ b/tests/unit/data/datamodule/image/test_visa.py @@ -22,7 +22,6 @@ def datamodule(dataset_path: Path, task_type: TaskType) -> Visa: _datamodule = Visa( root=dataset_path, category="dummy", - image_size=256, train_batch_size=4, eval_batch_size=4, num_workers=0, diff --git a/tests/unit/data/datamodule/video/test_avenue.py b/tests/unit/data/datamodule/video/test_avenue.py index 42365d059f..5069b93def 100644 --- a/tests/unit/data/datamodule/video/test_avenue.py +++ b/tests/unit/data/datamodule/video/test_avenue.py @@ -29,7 +29,6 @@ def datamodule(dataset_path: Path, task_type: TaskType, clip_length_in_frames: i root=dataset_path / "avenue", gt_dir=dataset_path / "avenue" / "ground_truth_demo", clip_length_in_frames=clip_length_in_frames, - image_size=256, task=task_type, num_workers=0, train_batch_size=4, diff --git a/tests/unit/data/datamodule/video/test_shanghaitech.py b/tests/unit/data/datamodule/video/test_shanghaitech.py index fda0d1a84d..4e96cfbaa7 100644 --- a/tests/unit/data/datamodule/video/test_shanghaitech.py +++ b/tests/unit/data/datamodule/video/test_shanghaitech.py @@ -29,7 +29,6 @@ def datamodule(dataset_path: Path, task_type: TaskType, clip_length_in_frames: i root=dataset_path / "shanghaitech", scene=1, clip_length_in_frames=clip_length_in_frames, - image_size=(256, 256), train_batch_size=4, eval_batch_size=4, num_workers=0, diff --git a/tests/unit/data/datamodule/video/test_ucsdped.py b/tests/unit/data/datamodule/video/test_ucsdped.py index 1148e9313a..669d72278a 100644 --- a/tests/unit/data/datamodule/video/test_ucsdped.py +++ b/tests/unit/data/datamodule/video/test_ucsdped.py @@ -30,7 +30,6 @@ def datamodule(dataset_path: Path, task_type: TaskType, clip_length_in_frames: i category="dummy", clip_length_in_frames=clip_length_in_frames, task=task_type, - image_size=256, train_batch_size=4, eval_batch_size=4, num_workers=0, diff --git a/tests/unit/engine/test_setup_transform.py b/tests/unit/engine/test_setup_transform.py deleted file mode 100644 index ebb60f81c0..0000000000 --- a/tests/unit/engine/test_setup_transform.py +++ /dev/null @@ -1,260 +0,0 @@ -"""Tests for the Anomalib Engine.""" - -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tempfile -from collections.abc import Generator -from pathlib import Path - -import pytest -import torch -from torch.utils.data import DataLoader -from torchvision.transforms.v2 import Resize, Transform - -from anomalib import LearningType, TaskType -from anomalib.data import AnomalibDataModule, AnomalibDataset, InferenceBatch -from anomalib.engine import Engine -from anomalib.models import AnomalyModule -from anomalib.post_processing import PostProcessor - - -class DummyDataset(AnomalibDataset): - """Dummy dataset for testing the setup_transform method.""" - - def __init__(self, transform: Transform = None) -> None: - super().__init__(TaskType.CLASSIFICATION, transform=transform) - self.image = torch.rand(3, 10, 10) - self._samples = None - - def _setup(self, _stage: str | None = None) -> None: - self._samples = None - - def __len__(self) -> int: - """Return the length of the dataset.""" - return 1 - - -class DummyPostProcessor(PostProcessor): - """Dummy post-processor for testing the setup_transform method.""" - - @staticmethod - def forward(batch: InferenceBatch) -> InferenceBatch: - """Return the batch unmodified.""" - return batch - - -class DummyModel(AnomalyModule): - """Dummy model for testing the setup_transform method.""" - - def __init__(self) -> None: - super().__init__() - self.model = torch.nn.Linear(10, 10) - - @staticmethod - def configure_transforms(image_size: tuple[int, int] | None = None) -> Transform: - """Return a Resize transform.""" - if image_size is None: - image_size = (256, 256) - return Resize(image_size) - - @staticmethod - def trainer_arguments() -> dict: - """Return an empty dictionary.""" - return {} - - @staticmethod - def learning_type() -> LearningType: - """Return the learning type.""" - return LearningType.ZERO_SHOT - - @staticmethod - def default_post_processor() -> PostProcessor: - """Return a dummy post-processor.""" - return DummyPostProcessor() - - -class DummyDataModule(AnomalibDataModule): - """Dummy datamodule for testing the setup_transform method.""" - - def __init__( - self, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, - image_size: tuple[int, int] | None = None, - ) -> None: - super().__init__( - train_batch_size=1, - eval_batch_size=1, - num_workers=0, - val_split_mode="from_test", - val_split_ratio=0.5, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, - ) - - def _create_val_split(self) -> None: - pass - - def _create_test_split(self) -> None: - pass - - def _setup(self, _stage: str | None = None) -> None: - self.train_data = DummyDataset(transform=self.train_transform) - self.val_data = DummyDataset(transform=self.eval_transform) - self.test_data = DummyDataset(transform=self.eval_transform) - - -@pytest.fixture() -def checkpoint_path() -> Generator: - """Fixture to create a temporary checkpoint file that stores a Resize transform.""" - # Create a temporary file - transform = Resize((50, 50)) - with tempfile.TemporaryDirectory() as temp_dir: - file_path = Path(temp_dir) / "model.ckpt" - checkpoint = {"transform": transform} - torch.save(checkpoint, file_path) - - yield file_path - - -class TestSetupTransform: - """Tests for the `_setup_transform` method of the Anomalib Engine.""" - - # test update single dataloader - @staticmethod - def test_single_dataloader_default_transform() -> None: - """Tests if the default model transform is used when no transform is passed to the dataloader.""" - dataset = DummyDataset() - dataloader = DataLoader(dataset, batch_size=1) - model = DummyModel() - # before the setup_transform is called, the dataset should not have a transform - assert dataset.transform is None - Engine._setup_transform(model, dataloaders=dataloader) # noqa: SLF001 - # after the setup_transform is called, the dataset should have the default transform from the model - assert dataset.transform is not None - - # test update multiple dataloaders - @staticmethod - def test_multiple_dataloaders_default_transform() -> None: - """Tests if the default model transform is used when no transform is passed to the dataloader.""" - dataset = DummyDataset() - dataloader = DataLoader(dataset, batch_size=1) - model = DummyModel() - # before the setup_transform is called, the dataset should not have a transform - assert dataset.transform is None - Engine._setup_transform(model, dataloaders=[dataloader, dataloader]) # noqa: SLF001 - # after the setup_transform is called, the dataset should have the default transform from the model - assert dataset.transform is not None - - @staticmethod - def test_single_dataloader_custom_transform() -> None: - """Tests if the user-specified transform is used when passed to the dataloader.""" - transform = Transform() - dataset = DummyDataset(transform=transform) - dataloader = DataLoader(dataset, batch_size=1) - model = DummyModel() - # before the setup_transform is called, the dataset should have the custom transform - assert dataset.transform == transform - Engine._setup_transform(model, dataloaders=dataloader) # noqa: SLF001 - # after the setup_transform is called, the model should have the custom transform - assert model.transform == transform - - # test if the user-specified transform is used when passed to the datamodule - @staticmethod - def test_custom_transform() -> None: - """Tests if the user-specified transform is used when passed to the datamodule.""" - transform = Transform() - datamodule = DummyDataModule(transform=transform) - model = DummyModel() - # assert that the datamodule uses the custom transform before and after setup_transform is called - assert datamodule.train_transform == transform - assert datamodule.eval_transform == transform - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - assert datamodule.train_transform == transform - assert datamodule.eval_transform == transform - assert model.transform == transform - - # test if the user-specified transform is used when passed to the datamodule - @staticmethod - def test_custom_train_transform() -> None: - """Tests if the user-specified transform is used when passed to the datamodule as train_transform.""" - model = DummyModel() - transform = Transform() - datamodule = DummyDataModule(train_transform=transform) - # before calling setup, train_transform should be the custom transform and eval_transform should be None - assert datamodule.train_transform == transform - assert datamodule.eval_transform is None - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - # after calling setup, train_transform should be the custom transform and eval_transform should be the default - assert datamodule.train_transform == transform - assert datamodule.eval_transform is None - assert model.transform != transform - assert model.transform is not None - - # test if the user-specified transform is used when passed to the datamodule - @staticmethod - def test_custom_eval_transform() -> None: - """Tests if the user-specified transform is used when passed to the datamodule as eval_transform.""" - model = DummyModel() - transform = Transform() - datamodule = DummyDataModule(eval_transform=transform) - # before calling setup, train_transform should be the custom transform and eval_transform should be None - assert datamodule.train_transform is None - assert datamodule.eval_transform == transform - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - # after calling setup, train_transform should be the custom transform and eval_transform should be the default - assert datamodule.train_transform is None - assert datamodule.eval_transform == transform - assert model.transform == transform - - # test update datamodule - @staticmethod - def test_datamodule_default_transform() -> None: - """Tests if the default model transform is used when no transform is passed to the datamodule.""" - datamodule = DummyDataModule() - model = DummyModel() - # assert that the datamodule has a transform after the setup_transform is called - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - assert isinstance(model.transform, Transform) - - # test if image size is taken from datamodule - @staticmethod - def test_datamodule_image_size() -> None: - """Tests if the image size that is passed to the datamodule overwrites the default size from the model.""" - datamodule = DummyDataModule(image_size=(100, 100)) - model = DummyModel() - # assert that the datamodule has a transform after the setup_transform is called - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - assert isinstance(model.transform, Resize) - assert model.transform.size == [100, 100] - - @staticmethod - def test_transform_from_checkpoint(checkpoint_path: Path) -> None: - """Tests if the transform from the checkpoint is used.""" - model = DummyModel() - Engine._setup_transform(model, ckpt_path=checkpoint_path) # noqa: SLF001 - assert isinstance(model.transform, Resize) - assert model.transform.size == [50, 50] - - @staticmethod - def test_precendence_datamodule(checkpoint_path: Path) -> None: - """Tests if transform from the datamodule goes first if both checkpoint and datamodule are provided.""" - transform = Transform() - datamodule = DummyDataModule(transform=transform) - model = DummyModel() - Engine._setup_transform(model, ckpt_path=checkpoint_path, datamodule=datamodule) # noqa: SLF001 - assert model.transform == transform - - @staticmethod - def test_transform_already_assigned() -> None: - """Tests if the transform from the model is used when the model already has a transform assigned.""" - transform = Transform() - model = DummyModel() - model.set_transform(transform) - datamodule = DummyDataModule() - Engine._setup_transform(model, datamodule=datamodule) # noqa: SLF001 - assert model.transform == transform From cc5f559376a813c1e21c5659288a0655ef6706ae Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 10 Oct 2024 15:50:08 +0100 Subject: [PATCH 08/59] Remove self._transform from AnomalyModule Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 38 ++----------------- 1 file changed, 3 insertions(+), 35 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 8a1c44ab06..52ee94b74c 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -17,7 +17,7 @@ from lightning.pytorch.trainer.states import TrainerFn from lightning.pytorch.utilities.types import STEP_OUTPUT from torch import nn -from torchvision.transforms.v2 import Compose, Normalize, Resize, Transform +from torchvision.transforms.v2 import Compose, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch, InferenceBatch @@ -58,7 +58,6 @@ def __init__( self.pre_processor = pre_processor or self.configure_pre_processor() self.post_processor = post_processor or self.default_post_processor() - self._transform: Transform | None = None self._input_size: tuple[int, int] | None = None self._is_setup = False # flag to track if setup has been called from the trainer @@ -180,19 +179,6 @@ def learning_type(self) -> LearningType: """Learning type of the model.""" raise NotImplementedError - @property - def transform(self) -> Transform: - """Retrieve the model-specific transform. - - If a transform has been set using `set_transform`, it will be returned. Otherwise, we will use the - model-specific default transform, conditioned on the input size. - """ - return self._transform - - def set_transform(self, transform: Transform) -> None: - """Update the transform linked to the model instance.""" - self._transform = transform - def configure_pre_processor(self, image_size: tuple[int, int] | None = None) -> PreProcessor: # noqa: PLR6301 """Configure the pre-processor. @@ -230,30 +216,12 @@ def input_size(self) -> tuple[int, int] | None: The effective input size is the size of the input tensor after the transform has been applied. If the transform is not set, or if the transform does not change the shape of the input tensor, this method will return None. """ - transform = self.transform or self.configure_pre_processor() + transform = self.pre_processor.test_transform if transform is None: return None dummy_input = torch.zeros(1, 3, 1, 1) output_shape = transform(dummy_input).shape[-2:] - if output_shape == (1, 1): - return None - return output_shape[-2:] - - def on_save_checkpoint(self, checkpoint: dict[str, Any]) -> None: - """Called when saving the model to a checkpoint. - - Saves the transform to the checkpoint. - """ - checkpoint["transform"] = self.transform - - def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None: - """Called when loading the model from a checkpoint. - - Loads the transform from the checkpoint and calls setup to ensure that the torch model is built before loading - the state dict. - """ - self._transform = checkpoint["transform"] - self.setup("load_checkpoint") + return None if output_shape == (1, 1) else output_shape[-2:] @classmethod def from_config( From 4d2e110479f0c79ca1b8820e70c37be17ed57ef7 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 11 Oct 2024 11:11:56 +0100 Subject: [PATCH 09/59] revert transforms in datasets Signed-off-by: Samet Akcay --- src/anomalib/data/datasets/depth/folder_3d.py | 6 +++++- src/anomalib/data/datasets/depth/mvtec_3d.py | 6 +++++- src/anomalib/data/datasets/image/btech.py | 5 ++++- src/anomalib/data/datasets/image/folder.py | 6 +++++- src/anomalib/data/datasets/image/kolektor.py | 5 ++++- src/anomalib/data/datasets/image/mvtec.py | 6 +++++- src/anomalib/data/datasets/image/visa.py | 7 ++++++- src/anomalib/data/datasets/video/avenue.py | 5 +++++ src/anomalib/data/datasets/video/shanghaitech.py | 5 +++++ src/anomalib/data/datasets/video/ucsd_ped.py | 5 +++++ 10 files changed, 49 insertions(+), 7 deletions(-) diff --git a/src/anomalib/data/datasets/depth/folder_3d.py b/src/anomalib/data/datasets/depth/folder_3d.py index 752727e1c6..121a9b40fc 100644 --- a/src/anomalib/data/datasets/depth/folder_3d.py +++ b/src/anomalib/data/datasets/depth/folder_3d.py @@ -9,6 +9,7 @@ from pathlib import Path from pandas import DataFrame, isna +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.depth import AnomalibDepthDataset @@ -46,6 +47,8 @@ class Folder3DDataset(AnomalibDepthDataset): split (str | Split | None): Fixed subset split that follows from folder structure on file system. Choose from [Split.FULL, Split.TRAIN, Split.TEST] Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the directory. Defaults to ``None``. @@ -67,9 +70,10 @@ def __init__( abnormal_depth_dir: str | Path | None = None, normal_test_depth_dir: str | Path | None = None, split: str | Split | None = None, + transform: Transform | None = None, extensions: tuple[str, ...] | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self._name = name self.split = split diff --git a/src/anomalib/data/datasets/depth/mvtec_3d.py b/src/anomalib/data/datasets/depth/mvtec_3d.py index 82ef991a96..da2c68f86a 100644 --- a/src/anomalib/data/datasets/depth/mvtec_3d.py +++ b/src/anomalib/data/datasets/depth/mvtec_3d.py @@ -23,6 +23,7 @@ from pathlib import Path from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.depth import AnomalibDepthDataset @@ -44,6 +45,8 @@ class MVTec3DDataset(AnomalibDepthDataset): Defaults to ``"bagel"``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. """ def __init__( @@ -52,8 +55,9 @@ def __init__( root: Path | str = "./datasets/MVTec3D", category: str = "bagel", split: str | Split | None = None, + transform: Transform | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.root_category = Path(root) / Path(category) self.split = split diff --git a/src/anomalib/data/datasets/image/btech.py b/src/anomalib/data/datasets/image/btech.py index f51c5a4669..7fc9ef05f1 100644 --- a/src/anomalib/data/datasets/image/btech.py +++ b/src/anomalib/data/datasets/image/btech.py @@ -13,6 +13,7 @@ import pandas as pd from pandas.core.frame import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -27,6 +28,7 @@ class BTechDataset(AnomalibDataset): Args: root: Path to the BTech dataset category: Name of the BTech category. + transform: Transform to apply to the input images. split: 'train', 'val' or 'test' task: ``classification``, ``detection`` or ``segmentation`` create_validation_set: Create a validation subset in addition to the train and test subsets @@ -63,10 +65,11 @@ def __init__( self, root: str | Path, category: str, + transform: Transform | None = None, split: str | Split | None = None, task: TaskType | str = TaskType.SEGMENTATION, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.root_category = Path(root) / category self.split = split diff --git a/src/anomalib/data/datasets/image/folder.py b/src/anomalib/data/datasets/image/folder.py index 01e1986414..18e162ee1c 100644 --- a/src/anomalib/data/datasets/image/folder.py +++ b/src/anomalib/data/datasets/image/folder.py @@ -10,6 +10,7 @@ from pathlib import Path from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.image import AnomalibDataset @@ -40,6 +41,8 @@ class FolderDataset(AnomalibDataset): split (str | Split | None): Fixed subset split that follows from folder structure on file system. Choose from [Split.FULL, Split.TRAIN, Split.TEST] Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the directory. Defaults to ``None``. @@ -72,9 +75,10 @@ def __init__( normal_test_dir: str | Path | Sequence[str | Path] | None = None, mask_dir: str | Path | Sequence[str | Path] | None = None, split: str | Split | None = None, + transform: Transform | None = None, extensions: tuple[str, ...] | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self._name = name self.split = split diff --git a/src/anomalib/data/datasets/image/kolektor.py b/src/anomalib/data/datasets/image/kolektor.py index 63a9feed36..e64c65c70a 100644 --- a/src/anomalib/data/datasets/image/kolektor.py +++ b/src/anomalib/data/datasets/image/kolektor.py @@ -23,6 +23,7 @@ from cv2 import imread from pandas import DataFrame from sklearn.model_selection import train_test_split +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets import AnomalibDataset @@ -39,6 +40,7 @@ class KolektorDataset(AnomalibDataset): Defaults to ``./datasets/kolektor``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. """ def __init__( @@ -46,8 +48,9 @@ def __init__( task: TaskType, root: Path | str = "./datasets/kolektor", split: str | Split | None = None, + transform: Transform | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.root = root self.split = split diff --git a/src/anomalib/data/datasets/image/mvtec.py b/src/anomalib/data/datasets/image/mvtec.py index 2e7e0c2af7..64146bb5d4 100644 --- a/src/anomalib/data/datasets/image/mvtec.py +++ b/src/anomalib/data/datasets/image/mvtec.py @@ -29,6 +29,7 @@ from pathlib import Path from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base import AnomalibDataset @@ -66,6 +67,8 @@ class MVTecDataset(AnomalibDataset): Defaults to ``bottle``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. Examples: .. code-block:: python @@ -105,8 +108,9 @@ def __init__( root: Path | str = "./datasets/MVTec", category: str = "bottle", split: str | Split | None = None, + transform: Transform | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.root_category = Path(root) / Path(category) self.category = category diff --git a/src/anomalib/data/datasets/image/visa.py b/src/anomalib/data/datasets/image/visa.py index f74f2a9535..70051b9406 100644 --- a/src/anomalib/data/datasets/image/visa.py +++ b/src/anomalib/data/datasets/image/visa.py @@ -21,6 +21,8 @@ from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datasets import AnomalibDataset from anomalib.data.datasets.image.mvtec import make_mvtec_dataset @@ -52,6 +54,8 @@ class VisaDataset(AnomalibDataset): category (str): Sub-category of the dataset, e.g. 'candle' split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. Examples: To create a Visa dataset for classification: @@ -100,8 +104,9 @@ def __init__( root: str | Path, category: str, split: str | Split | None = None, + transform: Transform | None = None, ) -> None: - super().__init__(task) + super().__init__(task, transform) self.root_category = Path(root) / category self.split = split diff --git a/src/anomalib/data/datasets/video/avenue.py b/src/anomalib/data/datasets/video/avenue.py index 21c9f49a22..84caebb044 100644 --- a/src/anomalib/data/datasets/video/avenue.py +++ b/src/anomalib/data/datasets/video/avenue.py @@ -20,6 +20,7 @@ import scipy import torch from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -46,6 +47,8 @@ class AvenueDataset(AnomalibVideoDataset): Defaults to ``1``. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. Defaults to ``VideoTargetFrame.LAST``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. Examples: To create an Avenue dataset to train a classification model: @@ -108,12 +111,14 @@ def __init__( clip_length_in_frames: int = 2, frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, + transform: Transform | None = None, ) -> None: super().__init__( task=task, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, + transform=transform, ) self.root = root if isinstance(root, Path) else Path(root) diff --git a/src/anomalib/data/datasets/video/shanghaitech.py b/src/anomalib/data/datasets/video/shanghaitech.py index 12e8dd985b..c49bb32332 100644 --- a/src/anomalib/data/datasets/video/shanghaitech.py +++ b/src/anomalib/data/datasets/video/shanghaitech.py @@ -23,6 +23,7 @@ import pandas as pd import torch from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -41,6 +42,8 @@ class ShanghaiTechDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. """ def __init__( @@ -52,12 +55,14 @@ def __init__( clip_length_in_frames: int = 2, frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, + transform: Transform | None = None, ) -> None: super().__init__( task=task, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, + transform=transform, ) self.root = Path(root) diff --git a/src/anomalib/data/datasets/video/ucsd_ped.py b/src/anomalib/data/datasets/video/ucsd_ped.py index feeda8ff7f..00fa3ba8ca 100644 --- a/src/anomalib/data/datasets/video/ucsd_ped.py +++ b/src/anomalib/data/datasets/video/ucsd_ped.py @@ -9,6 +9,7 @@ import numpy as np import torch from pandas import DataFrame +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datasets.base.video import AnomalibVideoDataset, VideoTargetFrame @@ -32,6 +33,8 @@ class UCSDpedDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. """ def __init__( @@ -43,12 +46,14 @@ def __init__( clip_length_in_frames: int = 2, frames_between_clips: int = 10, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, + transform: Transform | None = None, ) -> None: super().__init__( task=task, clip_length_in_frames=clip_length_in_frames, frames_between_clips=frames_between_clips, target_frame=target_frame, + transform=transform, ) self.root_category = Path(root) / category From 1e83e573d10fd2801caba4d62a2c14b809630bdf Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 11 Oct 2024 11:12:24 +0100 Subject: [PATCH 10/59] fix efficient_ad and engine.config tests Signed-off-by: Samet Akcay --- .../models/image/efficient_ad/lightning_model.py | 9 ++++++--- tests/unit/engine/test_engine.py | 4 ---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index 3e8407b36f..b5a7c6a809 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -255,9 +255,12 @@ def on_train_start(self) -> None: if self.trainer.datamodule.train_batch_size != 1: msg = "train_batch_size for EfficientAd should be 1." raise ValueError(msg) - if self._transform and any(isinstance(transform, Normalize) for transform in self._transform.transforms): - msg = "Transforms for EfficientAd should not contain Normalize." - raise ValueError(msg) + + if self.pre_processor.train_transform: + transforms = self.pre_processor.train_transform.transforms + if transforms and any(isinstance(transform, Normalize) for transform in transforms): + msg = "Transforms for EfficientAd should not contain Normalize." + raise ValueError(msg) sample = next(iter(self.trainer.train_dataloader)) image_size = sample.image.shape[-2:] diff --git a/tests/unit/engine/test_engine.py b/tests/unit/engine/test_engine.py index 1c2e157a05..eabe9e6649 100644 --- a/tests/unit/engine/test_engine.py +++ b/tests/unit/engine/test_engine.py @@ -90,10 +90,6 @@ def fxt_full_config_path(tmp_path: Path) -> Path: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - image_size: null - transform: null - train_transform: null - eval_transform: null test_split_mode: FROM_DIR test_split_ratio: 0.2 val_split_mode: SAME_AS_TEST From 1e05349d8854e61841da5bd102594bd1fbe03f1e Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 11 Oct 2024 12:49:47 +0100 Subject: [PATCH 11/59] Update the upgrade tests Signed-off-by: Samet Akcay --- tests/integration/tools/upgrade/expected_draem_v1.yaml | 7 +------ tools/upgrade/config.py | 4 ---- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/tests/integration/tools/upgrade/expected_draem_v1.yaml b/tests/integration/tools/upgrade/expected_draem_v1.yaml index a965186c90..7084d6e464 100644 --- a/tests/integration/tools/upgrade/expected_draem_v1.yaml +++ b/tests/integration/tools/upgrade/expected_draem_v1.yaml @@ -3,16 +3,10 @@ data: init_args: root: ./datasets/MVTec category: bottle - image_size: - - 256 - - 256 train_batch_size: 72 eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test @@ -27,6 +21,7 @@ model: beta: - 0.1 - 1.0 + pre_processor: null normalization: normalization_method: min_max metrics: diff --git a/tools/upgrade/config.py b/tools/upgrade/config.py index 71bf17a4b5..5f1f3278e1 100644 --- a/tools/upgrade/config.py +++ b/tools/upgrade/config.py @@ -27,7 +27,6 @@ import yaml from anomalib.models import convert_snake_to_pascal_case -from anomalib.utils.config import to_tuple def get_class_signature(module_path: str, class_name: str) -> inspect.Signature: @@ -144,9 +143,6 @@ def upgrade_data_config(self) -> dict[str, Any]: self.old_config["dataset"], ) - # Input size is a list in the old config, convert it to a tuple - init_args["image_size"] = to_tuple(init_args["image_size"]) - return { "data": { "class_path": class_path, From 785d64f732379efde3369a7b96e598e0a6d778a4 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 11 Oct 2024 16:49:30 +0100 Subject: [PATCH 12/59] Revert on_load_checkpoint hook to AnomalyModule Signed-off-by: Samet Akcay --- src/anomalib/models/components/base/anomaly_module.py | 11 ++++++++++- tests/integration/model/test_models.py | 5 +---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 52ee94b74c..9f26868880 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -83,6 +83,15 @@ def _setup(self) -> None: initialization. """ + def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None: + """Called when loading a checkpoint. + + This method is called to ensure that the `TorchModel` is built before + loading the state dict. + """ + del checkpoint # `checkpoint` variable is not used. + self.setup(stage="load_checkpoint") + def configure_callbacks(self) -> Sequence[Callback] | Callback: """Configure default callbacks for AnomalyModule.""" return [self.pre_processor] @@ -216,7 +225,7 @@ def input_size(self) -> tuple[int, int] | None: The effective input size is the size of the input tensor after the transform has been applied. If the transform is not set, or if the transform does not change the shape of the input tensor, this method will return None. """ - transform = self.pre_processor.test_transform + transform = self.pre_processor.train_transform if transform is None: return None dummy_input = torch.zeros(1, 3, 1, 1) diff --git a/tests/integration/model/test_models.py b/tests/integration/model/test_models.py index 9c344976f0..bf2883d997 100644 --- a/tests/integration/model/test_models.py +++ b/tests/integration/model/test_models.py @@ -187,12 +187,9 @@ def _get_objects( extra_args = {} if model_name in {"rkde", "dfkde"}: extra_args["n_pca_components"] = 2 + if model_name == "ai_vad": pytest.skip("Revisit AI-VAD test") - - # select dataset - elif model_name == "win_clip": - dataset = MVTec(root=dataset_path / "mvtec", category="dummy", image_size=240, task=task_type) else: # EfficientAd requires that the batch size be lesser than the number of images in the dataset. # This is so that the LR step size is not 0. From b7982436a0d39e724d1450840208d1bb127c4d0b Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 15 Oct 2024 05:58:56 +0100 Subject: [PATCH 13/59] Remove exportable transform from anomaly module and move to pre-processor Signed-off-by: Samet Akcay --- src/anomalib/deploy/utils.py | 4 +++- src/anomalib/models/components/base/anomaly_module.py | 2 -- src/anomalib/models/components/base/export_mixin.py | 8 -------- src/anomalib/pre_processing/pre_processing.py | 7 ++++--- 4 files changed, 7 insertions(+), 14 deletions(-) diff --git a/src/anomalib/deploy/utils.py b/src/anomalib/deploy/utils.py index e2f23bf841..30131e82c9 100644 --- a/src/anomalib/deploy/utils.py +++ b/src/anomalib/deploy/utils.py @@ -8,11 +8,13 @@ from anomalib.data.transforms import ExportableCenterCrop -def make_transform_exportable(transform: Transform) -> Transform: +def get_exportable_transform(transform: Transform | None) -> Transform | None: """Get exportable transform. Some transforms are not supported by ONNX/OpenVINO, so we need to replace them with exportable versions. """ + if transform is None: + return None transform = disable_antialiasing(transform) return convert_centercrop(transform) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 9f26868880..7be1a39601 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -108,8 +108,6 @@ def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch: Tensor: Output tensor from the model. """ del args, kwargs # These variables are not used. - if self.exportable_transform: - batch = self.exportable_transform(batch) batch = self.model(batch) return self.post_processor(batch) if self.post_processor else batch diff --git a/src/anomalib/models/components/base/export_mixin.py b/src/anomalib/models/components/base/export_mixin.py index 6dc674baa9..78a43270a8 100644 --- a/src/anomalib/models/components/base/export_mixin.py +++ b/src/anomalib/models/components/base/export_mixin.py @@ -15,12 +15,10 @@ from lightning.pytorch import LightningModule from torch import nn from torchmetrics import Metric -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data import AnomalibDataModule from anomalib.deploy.export import CompressionType, ExportType -from anomalib.deploy.utils import make_transform_exportable from anomalib.metrics import create_metric_collection from anomalib.pre_processing import PreProcessor from anomalib.utils.exceptions import try_import @@ -440,12 +438,6 @@ def _get_metadata( return metadata - @property - def exportable_transform(self) -> Transform | None: - """Return the exportable transform.""" - transform = self.pre_processor.test_transform - return make_transform_exportable(transform) if transform else None - def _write_metadata_to_json(metadata: dict[str, Any], export_root: Path) -> None: """Write metadata to json file. diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index d27c3f4961..bb082a8311 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -8,6 +8,7 @@ from torchvision.transforms.v2 import Transform from anomalib.data.dataclasses.torch.base import Batch +from anomalib.deploy.utils import get_exportable_transform class PreProcessor(nn.Module, Callback): @@ -31,9 +32,9 @@ def __init__( ) raise ValueError(msg) - self.train_transform = train_transform or transform - self.val_transform = val_transform or transform - self.test_transform = test_transform or transform + self.train_transform = get_exportable_transform(train_transform or transform) + self.val_transform = get_exportable_transform(val_transform or transform) + self.test_transform = get_exportable_transform(test_transform or transform) def on_train_batch_start( self, From ea2883344ab2628cc1e101fdebae4c5c3db11e99 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 17 Oct 2024 13:43:44 +0100 Subject: [PATCH 14/59] Add pre-processor to the model graph Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 1 + .../models/components/base/export_mixin.py | 2 +- src/anomalib/pre_processing/pre_processing.py | 49 +++++++++++++------ 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 7be1a39601..bffc99ff03 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -108,6 +108,7 @@ def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch: Tensor: Output tensor from the model. """ del args, kwargs # These variables are not used. + batch = self.pre_processor(batch) if self.pre_processor else batch batch = self.model(batch) return self.post_processor(batch) if self.post_processor else batch diff --git a/src/anomalib/models/components/base/export_mixin.py b/src/anomalib/models/components/base/export_mixin.py index 5fade6c73b..22beae0774 100644 --- a/src/anomalib/models/components/base/export_mixin.py +++ b/src/anomalib/models/components/base/export_mixin.py @@ -140,7 +140,7 @@ def to_onnx( dynamic_axes = ( {"input": {0: "batch_size"}, "output": {0: "batch_size"}} if input_size - else {"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}} + else {"input": {0: "batch_size", 2: "height", 3: "width"}, "output": {0: "batch_size"}} ) onnx_path = export_root / "model.onnx" # apply pass through the model to get the output names diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index bb082a8311..d27f88e5a4 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -3,6 +3,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import torch from lightning import Callback, LightningModule, Trainer from torch import nn from torchvision.transforms.v2 import Transform @@ -32,10 +33,27 @@ def __init__( ) raise ValueError(msg) - self.train_transform = get_exportable_transform(train_transform or transform) + self.train_transform = train_transform or transform self.val_transform = get_exportable_transform(val_transform or transform) self.test_transform = get_exportable_transform(test_transform or transform) + self.current_transform = self.train_transform # Default to train transform + + def forward(self, batch: Batch | torch.Tensor) -> Batch | torch.Tensor: + """Apply transforms to the batch.""" + if self.current_transform: + if isinstance(batch, Batch): + image, gt_mask = self.current_transform(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) + else: + batch = self.current_transform(batch) + return batch + + def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: + """Set the current transform to the train transform.""" + del trainer, pl_module # Unused parameters + self.current_transform = self.train_transform + def on_train_batch_start( self, trainer: Trainer, @@ -45,10 +63,12 @@ def on_train_batch_start( ) -> None: """Apply transforms to the training batch.""" del trainer, pl_module, batch_idx # Unused parameters + self(batch) - if self.train_transform: - image, gt_mask = self.train_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None: + """Set the current transform to the validation transform.""" + del trainer, pl_module # Unused parameters + self.current_transform = self.val_transform def on_validation_batch_start( self, @@ -60,10 +80,12 @@ def on_validation_batch_start( ) -> None: """Apply transforms to the validation batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + self(batch) - if self.val_transform: - image, gt_mask = self.val_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None: + """Set the current transform to the test transform.""" + del trainer, pl_module # Unused parameters + self.current_transform = self.test_transform def on_test_batch_start( self, @@ -75,10 +97,12 @@ def on_test_batch_start( ) -> None: """Apply transforms to the test batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + self(batch) - if self.test_transform: - image, gt_mask = self.test_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + def on_predict_start(self, trainer: Trainer, pl_module: LightningModule) -> None: + """Set the current transform to the test transform.""" + del trainer, pl_module # Unused parameters + self.current_transform = self.test_transform def on_predict_batch_start( self, @@ -90,7 +114,4 @@ def on_predict_batch_start( ) -> None: """Apply transforms to the predict batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - - if self.test_transform: - image, gt_mask = self.test_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + self(batch) From 78cf5161ae45ea69c31fa23354b4f3bee94a4c1e Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 17 Oct 2024 13:51:06 +0100 Subject: [PATCH 15/59] Add docstring to pre-processor class Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 53 ++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index d27f88e5a4..30f9f484f6 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -13,7 +13,58 @@ class PreProcessor(nn.Module, Callback): - """Anomalib pre-processor.""" + """Anomalib pre-processor. + + This class serves as both a PyTorch module and a Lightning callback, handling + the application of transforms to data batches during different stages of + training, validation, testing, and prediction. + + Args: + train_transform (Transform | None): Transform to apply during training. + val_transform (Transform | None): Transform to apply during validation. + test_transform (Transform | None): Transform to apply during testing. + transform (Transform | None): General transform to apply if stage-specific + transforms are not provided. + + Raises: + ValueError: If both `transform` and any of the stage-specific transforms + are provided simultaneously. + + Notes: + If only `transform` is provided, it will be used for all stages (train, val, test). + + Examples: + >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor + >>> from anomalib.pre_processing import PreProcessor + + >>> # Define transforms + >>> train_transform = Compose([Resize((224, 224)), ToTensor()]) + >>> val_transform = Compose([Resize((256, 256)), CenterCrop((224, 224)), ToTensor()]) + + >>> # Create PreProcessor with stage-specific transforms + >>> pre_processor = PreProcessor( + ... train_transform=train_transform, + ... val_transform=val_transform + ... ) + + >>> # Create PreProcessor with a single transform for all stages + >>> common_transform = Compose([Resize((224, 224)), ToTensor()]) + >>> pre_processor_common = PreProcessor(transform=common_transform) + + >>> # Use in a Lightning module + >>> class MyModel(LightningModule): + ... def __init__(self): + ... super().__init__() + ... self.pre_processor = PreProcessor(...) + ... + ... def configure_callbacks(self): + ... return [self.pre_processor] + ... + ... def training_step(self, batch, batch_idx): + ... # The pre_processor will automatically apply the correct transform + ... processed_batch = self.pre_processor(batch) + ... # Rest of the training step + """ def __init__( self, From 46fe7e5d9ae029be8c822811b753a1324b2ed598 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 17 Oct 2024 17:19:08 +0100 Subject: [PATCH 16/59] Fix win-clip tests Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 30f9f484f6..6f8e21ebd3 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -88,7 +88,7 @@ def __init__( self.val_transform = get_exportable_transform(val_transform or transform) self.test_transform = get_exportable_transform(test_transform or transform) - self.current_transform = self.train_transform # Default to train transform + self.current_transform = self.test_transform # Default to test transform def forward(self, batch: Batch | torch.Tensor) -> Batch | torch.Tensor: """Apply transforms to the batch.""" From f058fbbd9fb9e6e5ec5b6c724dedb177c85adf73 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 17 Oct 2024 19:38:23 +0100 Subject: [PATCH 17/59] Update notebooks Signed-off-by: Samet Akcay --- notebooks/100_datamodules/101_btech.ipynb | 1 - notebooks/100_datamodules/102_mvtec.ipynb | 1 - notebooks/100_datamodules/103_folder.ipynb | 1 - notebooks/200_models/201_fastflow.ipynb | 3 +-- notebooks/600_loggers/601_mlflow_logging.ipynb | 1 - notebooks/700_metrics/701a_aupimo.ipynb | 1 - notebooks/700_metrics/701b_aupimo_advanced_i.ipynb | 1 - notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb | 1 - 8 files changed, 1 insertion(+), 9 deletions(-) diff --git a/notebooks/100_datamodules/101_btech.ipynb b/notebooks/100_datamodules/101_btech.ipynb index ef188665e6..4e00ae5fb6 100644 --- a/notebooks/100_datamodules/101_btech.ipynb +++ b/notebooks/100_datamodules/101_btech.ipynb @@ -106,7 +106,6 @@ "btech_datamodule = BTech(\n", " root=dataset_root,\n", " category=\"01\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", diff --git a/notebooks/100_datamodules/102_mvtec.ipynb b/notebooks/100_datamodules/102_mvtec.ipynb index 4c274939d6..432c530482 100644 --- a/notebooks/100_datamodules/102_mvtec.ipynb +++ b/notebooks/100_datamodules/102_mvtec.ipynb @@ -84,7 +84,6 @@ "mvtec_datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", diff --git a/notebooks/100_datamodules/103_folder.ipynb b/notebooks/100_datamodules/103_folder.ipynb index 2f642e145a..dbe94b5cdc 100644 --- a/notebooks/100_datamodules/103_folder.ipynb +++ b/notebooks/100_datamodules/103_folder.ipynb @@ -102,7 +102,6 @@ " abnormal_dir=\"crack\",\n", " task=TaskType.SEGMENTATION,\n", " mask_dir=dataset_root / \"mask\" / \"crack\",\n", - " image_size=(256, 256),\n", ")\n", "folder_datamodule.setup()" ] diff --git a/notebooks/200_models/201_fastflow.ipynb b/notebooks/200_models/201_fastflow.ipynb index 4cf8853fb3..57d821489e 100644 --- a/notebooks/200_models/201_fastflow.ipynb +++ b/notebooks/200_models/201_fastflow.ipynb @@ -120,7 +120,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", @@ -555,7 +554,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/600_loggers/601_mlflow_logging.ipynb b/notebooks/600_loggers/601_mlflow_logging.ipynb index f45a7a0e74..f487856e3b 100644 --- a/notebooks/600_loggers/601_mlflow_logging.ipynb +++ b/notebooks/600_loggers/601_mlflow_logging.ipynb @@ -197,7 +197,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=24,\n", diff --git a/notebooks/700_metrics/701a_aupimo.ipynb b/notebooks/700_metrics/701a_aupimo.ipynb index 5c5497b3b8..4ba4d38adc 100644 --- a/notebooks/700_metrics/701a_aupimo.ipynb +++ b/notebooks/700_metrics/701a_aupimo.ipynb @@ -140,7 +140,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", diff --git a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb index a785075060..80643fee99 100644 --- a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb +++ b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb @@ -164,7 +164,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", diff --git a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb index ed647ef666..0798f94e55 100644 --- a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb +++ b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb @@ -158,7 +158,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", From 84c39cd6695c1e856283a07d54034957aabe49e7 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Mon, 21 Oct 2024 13:37:29 +0100 Subject: [PATCH 18/59] Split the forward logic and move the training to model hooks Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 64 +++++++------------ 1 file changed, 24 insertions(+), 40 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 6f8e21ebd3..a7dbb57246 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -85,25 +85,19 @@ def __init__( raise ValueError(msg) self.train_transform = train_transform or transform - self.val_transform = get_exportable_transform(val_transform or transform) - self.test_transform = get_exportable_transform(test_transform or transform) - - self.current_transform = self.test_transform # Default to test transform - - def forward(self, batch: Batch | torch.Tensor) -> Batch | torch.Tensor: - """Apply transforms to the batch.""" - if self.current_transform: - if isinstance(batch, Batch): - image, gt_mask = self.current_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) - else: - batch = self.current_transform(batch) - return batch - - def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None: - """Set the current transform to the train transform.""" - del trainer, pl_module # Unused parameters - self.current_transform = self.train_transform + self.val_transform = val_transform or transform + self.test_transform = test_transform or transform + + self.exportable_transform = get_exportable_transform(self.test_transform) + + def forward(self, batch: torch.Tensor) -> torch.Tensor: + """Apply transforms to the batch of tensors for inference. + + This forward-pass is only used after the model is exported. + Within the Lightning training/validation/testing loops, the transforms are applied + in the `on_*_batch_start` methods. + """ + return self.exportable_transform(batch) if self.exportable_transform else batch def on_train_batch_start( self, @@ -114,12 +108,9 @@ def on_train_batch_start( ) -> None: """Apply transforms to the training batch.""" del trainer, pl_module, batch_idx # Unused parameters - self(batch) - - def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None: - """Set the current transform to the validation transform.""" - del trainer, pl_module # Unused parameters - self.current_transform = self.val_transform + if self.train_transform: + image, gt_mask = self.train_transform(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) def on_validation_batch_start( self, @@ -131,12 +122,9 @@ def on_validation_batch_start( ) -> None: """Apply transforms to the validation batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - self(batch) - - def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None: - """Set the current transform to the test transform.""" - del trainer, pl_module # Unused parameters - self.current_transform = self.test_transform + if self.val_transform: + image, gt_mask = self.val_transform(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) def on_test_batch_start( self, @@ -148,12 +136,9 @@ def on_test_batch_start( ) -> None: """Apply transforms to the test batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - self(batch) - - def on_predict_start(self, trainer: Trainer, pl_module: LightningModule) -> None: - """Set the current transform to the test transform.""" - del trainer, pl_module # Unused parameters - self.current_transform = self.test_transform + if self.test_transform: + image, gt_mask = self.test_transform(batch.image, batch.gt_mask) + batch.update(image=image, gt_mask=gt_mask) def on_predict_batch_start( self, @@ -163,6 +148,5 @@ def on_predict_batch_start( batch_idx: int, dataloader_idx: int = 0, ) -> None: - """Apply transforms to the predict batch.""" - del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - self(batch) + """Apply transforms to the predict batch, which is the same as test batch.""" + self.on_test_batch_start(trainer, pl_module, batch, batch_idx, dataloader_idx) From 6ebbb23316cecdb04d2496dd9c82b6dc1e0ae0b9 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Mon, 21 Oct 2024 17:13:24 +0100 Subject: [PATCH 19/59] Set data transforms from preprocessor Signed-off-by: Samet Akcay --- src/anomalib/data/utils/transform.py | 107 ++++++++++++++++++ src/anomalib/pre_processing/pre_processing.py | 29 +++++ 2 files changed, 136 insertions(+) create mode 100644 src/anomalib/data/utils/transform.py diff --git a/src/anomalib/data/utils/transform.py b/src/anomalib/data/utils/transform.py new file mode 100644 index 0000000000..810d76576b --- /dev/null +++ b/src/anomalib/data/utils/transform.py @@ -0,0 +1,107 @@ +"""Utility functions for data transforms.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Sequence + +from torch.utils.data import DataLoader +from torchvision.transforms.v2 import Transform + +from anomalib.data import AnomalibDataModule + + +def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: + """Set a transform for a specific stage in a AnomalibDataModule. + + This function allows you to set a custom transform for a specific stage (train, val, or test) + in an AnomalibDataModule. It checks if the datamodule has the corresponding dataset attribute + and if that dataset has a transform attribute, then sets the new transform. + + Args: + datamodule: The AnomalibDataModule to set the transform for. + transform: The transform to set. + stage: The stage (e.g., 'train', 'val', 'test') to set the transform for. + + Examples: + >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor + >>> from anomalib.data import MVTec + >>> from anomalib.data.utils.transform import set_datamodule_transform + + >>> # Create a datamodule + >>> datamodule = MVTec(root="path/to/dataset", category="bottle") + + >>> # Setup the datamodule (initially, there are no custom transforms) + >>> datamodule.setup() + >>> print(datamodule.train_data.transform) # Output: None or default transform + + >>> # Define a custom transform + >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) + + >>> # Set the custom transform for the training stage + >>> set_datamodule_transform(datamodule, custom_transform, "train") + + >>> # Verify that the transform has been set + >>> print(datamodule.train_data.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) + + >>> # You can also set transforms for validation and test stages + >>> set_datamodule_transform(datamodule, custom_transform, "val") + >>> set_datamodule_transform(datamodule, custom_transform, "test") + + >>> # The dataloaders will now use the custom transforms + >>> train_dataloader = datamodule.train_dataloader() + >>> val_dataloader = datamodule.val_dataloader() + >>> test_dataloader = datamodule.test_dataloader() + """ + dataset_attr = f"{stage}_data" + if hasattr(datamodule, dataset_attr): + dataset = getattr(datamodule, dataset_attr) + if hasattr(dataset, "transform"): + dataset.transform = transform + + +def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: + """Set a transform for a dataloader or list of dataloaders. + + Args: + dataloader: The dataloader(s) to set the transform for. Can be a single DataLoader, + a callable returning a DataLoader, or a list of DataLoaders. + transform: The transform to set. + + Examples: + >>> from torch.utils.data import DataLoader + >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor + >>> from anomalib.data import MVTecDataset + >>> from anomalib.data.utils.transform import set_dataloader_transform + + >>> # Create a dataset and dataloader + >>> dataset = MVTecDataset(root="./datasets/MVTec", category="bottle", task="segmentation") + >>> dataloader = DataLoader(dataset, batch_size=32) + + >>> # Define a custom transform + >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) + + >>> # Set the transform for a single DataLoader + >>> set_dataloader_transform(dataloader, custom_transform) + >>> print(dataloader.dataset.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) + + >>> # Set the transform for a list of DataLoaders + >>> dataset_bottle = MVTecDataset(root="./datasets/MVTec", category="bottle", task="segmentation") + >>> dataset_cable = MVTecDataset(root="./datasets/MVTec", category="cable", task="segmentation") + >>> dataloader_list = [ + ... DataLoader(dataset_bottle, batch_size=32), + ... DataLoader(dataset_cable, batch_size=32) + ... ] + >>> set_dataloader_transform(dataloader_list, custom_transform) + >>> for dl in dataloader_list: + ... print(dl.dataset.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) + """ + if isinstance(dataloader, DataLoader): + if hasattr(dataloader.dataset, "transform"): + dataloader.dataset.transform = transform + elif isinstance(dataloader, Sequence): + for dl in dataloader: + set_dataloader_transform(dl, transform) + else: + msg = f"Unsupported dataloader type: {type(dataloader)}" + raise TypeError(msg) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index a7dbb57246..5b8c8f46a1 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -5,10 +5,12 @@ import torch from lightning import Callback, LightningModule, Trainer +from lightning.pytorch.trainer.states import TrainerFn from torch import nn from torchvision.transforms.v2 import Transform from anomalib.data.dataclasses.torch.base import Batch +from anomalib.data.utils.transform import set_dataloader_transform, set_datamodule_transform from anomalib.deploy.utils import get_exportable_transform @@ -87,9 +89,36 @@ def __init__( self.train_transform = train_transform or transform self.val_transform = val_transform or transform self.test_transform = test_transform or transform + self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) + def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: + """Set the transforms for datamodule or dataloaders. + + The model-specific transforms are configured within PreProcessor and stored in + model implementation. This method sets the transforms for the datamodule or + dataloaders. + + Args: + trainer: The Lightning trainer. + pl_module: The Lightning module. + stage: The stage (e.g., 'fit', 'train', 'val', 'test', 'predict'). + """ + super().setup(trainer, pl_module, stage) + stage = TrainerFn(stage).value # This is to convert the stage to a string + stages = ["train", "val"] if stage == "fit" else [stage] + for current_stage in stages: + transform = getattr(self, f"{current_stage}_transform") + if transform: + if hasattr(trainer, "datamodule"): + set_datamodule_transform(trainer.datamodule, transform, current_stage) + elif hasattr(trainer, f"{current_stage}_dataloaders"): + set_dataloader_transform(getattr(trainer, f"{current_stage}_dataloaders"), transform) + else: + msg = f"Trainer does not have a datamodule or {current_stage}_dataloaders attribute" + raise ValueError(msg) + def forward(self, batch: torch.Tensor) -> torch.Tensor: """Apply transforms to the batch of tensors for inference. From 1b0483a00f55e6fe36d62ed4d569ae37f67d5ab4 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Mon, 21 Oct 2024 17:17:42 +0100 Subject: [PATCH 20/59] Update the docstrings Signed-off-by: Samet Akcay --- src/anomalib/data/utils/transform.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/anomalib/data/utils/transform.py b/src/anomalib/data/utils/transform.py index 810d76576b..8593e3ff75 100644 --- a/src/anomalib/data/utils/transform.py +++ b/src/anomalib/data/utils/transform.py @@ -28,20 +28,14 @@ def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transfor >>> from anomalib.data import MVTec >>> from anomalib.data.utils.transform import set_datamodule_transform - >>> # Create a datamodule + >>> # Create a datamodule and check its transform >>> datamodule = MVTec(root="path/to/dataset", category="bottle") - - >>> # Setup the datamodule (initially, there are no custom transforms) >>> datamodule.setup() >>> print(datamodule.train_data.transform) # Output: None or default transform - >>> # Define a custom transform + >>> # Define a custom transform and set it for the training stage >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) - - >>> # Set the custom transform for the training stage >>> set_datamodule_transform(datamodule, custom_transform, "train") - - >>> # Verify that the transform has been set >>> print(datamodule.train_data.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) >>> # You can also set transforms for validation and test stages @@ -78,10 +72,8 @@ def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], tran >>> dataset = MVTecDataset(root="./datasets/MVTec", category="bottle", task="segmentation") >>> dataloader = DataLoader(dataset, batch_size=32) - >>> # Define a custom transform + >>> # Define a custom transform and set it for a single DataLoader >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) - - >>> # Set the transform for a single DataLoader >>> set_dataloader_transform(dataloader, custom_transform) >>> print(dataloader.dataset.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) From a503be11e2124febd6cd94cfa0cb4471c51402d9 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 22 Oct 2024 11:31:50 +0000 Subject: [PATCH 21/59] Get stage transforms in setup of pre-processor Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 5b8c8f46a1..4f9ff5bbc1 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -106,18 +106,25 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non stage: The stage (e.g., 'fit', 'train', 'val', 'test', 'predict'). """ super().setup(trainer, pl_module, stage) - stage = TrainerFn(stage).value # This is to convert the stage to a string - stages = ["train", "val"] if stage == "fit" else [stage] - for current_stage in stages: - transform = getattr(self, f"{current_stage}_transform") - if transform: - if hasattr(trainer, "datamodule"): - set_datamodule_transform(trainer.datamodule, transform, current_stage) - elif hasattr(trainer, f"{current_stage}_dataloaders"): - set_dataloader_transform(getattr(trainer, f"{current_stage}_dataloaders"), transform) - else: - msg = f"Trainer does not have a datamodule or {current_stage}_dataloaders attribute" - raise ValueError(msg) + # Get stage transform + stage = TrainerFn(stage).value # Make sure ``stage`` is a str + stage_transforms = { + "fit": self.train_transform, + "validate": self.val_transform, + "test": self.test_transform, + "predict": self.predict_transform, + } + transform = stage_transforms.get(stage) + + # Assign the transform to the datamodule or dataloaders + if transform: + if hasattr(trainer, "datamodule"): + set_datamodule_transform(trainer.datamodule, transform, stage) + elif hasattr(trainer, f"{stage}_dataloaders"): + set_dataloader_transform(getattr(trainer, f"{stage}_dataloaders"), transform) + else: + msg = f"Trainer does not have a datamodule or {stage}_dataloaders attribute" + raise ValueError(msg) def forward(self, batch: torch.Tensor) -> torch.Tensor: """Apply transforms to the batch of tensors for inference. From 427f680ba1d3e296eab20d2905ede638ab27687d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 12:39:26 +0100 Subject: [PATCH 22/59] Revert data config yaml files Signed-off-by: Samet Akcay --- configs/data/avenue.yaml | 3 +++ configs/data/btech.yaml | 3 +++ configs/data/folder.yaml | 3 +++ configs/data/kolektor.yaml | 3 +++ configs/data/mvtec.yaml | 3 +++ configs/data/mvtec_3d.yaml | 3 +++ configs/data/shanghaitech.yaml | 3 +++ configs/data/ucsd_ped.yaml | 3 +++ configs/data/visa.yaml | 3 +++ 9 files changed, 27 insertions(+) diff --git a/configs/data/avenue.yaml b/configs/data/avenue.yaml index 8fb07660ce..396a9ba6b5 100644 --- a/configs/data/avenue.yaml +++ b/configs/data/avenue.yaml @@ -8,6 +8,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null val_split_mode: from_test val_split_ratio: 0.5 seed: null diff --git a/configs/data/btech.yaml b/configs/data/btech.yaml index 9aa030540c..22bfd0d8fe 100644 --- a/configs/data/btech.yaml +++ b/configs/data/btech.yaml @@ -5,6 +5,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/folder.yaml b/configs/data/folder.yaml index 76be1382a7..329fba6520 100644 --- a/configs/data/folder.yaml +++ b/configs/data/folder.yaml @@ -12,6 +12,9 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/kolektor.yaml b/configs/data/kolektor.yaml index 5daec435e4..1b2e6fe6b4 100644 --- a/configs/data/kolektor.yaml +++ b/configs/data/kolektor.yaml @@ -4,6 +4,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec.yaml b/configs/data/mvtec.yaml index 5fb206e144..7728808ece 100644 --- a/configs/data/mvtec.yaml +++ b/configs/data/mvtec.yaml @@ -6,6 +6,9 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec_3d.yaml b/configs/data/mvtec_3d.yaml index f567f80899..d880f92f8f 100644 --- a/configs/data/mvtec_3d.yaml +++ b/configs/data/mvtec_3d.yaml @@ -5,6 +5,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/shanghaitech.yaml b/configs/data/shanghaitech.yaml index d18e7671dc..be4da54311 100644 --- a/configs/data/shanghaitech.yaml +++ b/configs/data/shanghaitech.yaml @@ -8,6 +8,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/ucsd_ped.yaml b/configs/data/ucsd_ped.yaml index 1226e4f149..009a5ef224 100644 --- a/configs/data/ucsd_ped.yaml +++ b/configs/data/ucsd_ped.yaml @@ -8,6 +8,9 @@ init_args: train_batch_size: 8 eval_batch_size: 1 num_workers: 8 + transform: null + train_transform: null + eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/visa.yaml b/configs/data/visa.yaml index 0d94e82fa4..c5656a2158 100644 --- a/configs/data/visa.yaml +++ b/configs/data/visa.yaml @@ -5,6 +5,9 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test From 2ee60ee83bdee639829490b85b2486f3ee448596 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 12:40:09 +0100 Subject: [PATCH 23/59] Revert datamodules Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 71 +++++++- .../data/datamodules/depth/folder_3d.py | 20 +++ .../data/datamodules/depth/mvtec_3d.py | 20 ++- src/anomalib/data/datamodules/image/btech.py | 23 +++ src/anomalib/data/datamodules/image/folder.py | 160 +++++++++--------- .../data/datamodules/image/kolektor.py | 20 +++ src/anomalib/data/datamodules/image/mvtec.py | 24 ++- src/anomalib/data/datamodules/image/visa.py | 19 +++ src/anomalib/data/datamodules/video/avenue.py | 19 +++ 9 files changed, 293 insertions(+), 83 deletions(-) diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index 8476bf5eeb..feb1d00c3b 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -12,6 +12,7 @@ from lightning.pytorch.trainer.states import TrainerFn from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from torch.utils.data.dataloader import DataLoader +from torchvision.transforms.v2 import Resize, Transform from anomalib.data.utils import TestSplitMode, ValSplitMode, random_split, split_by_label from anomalib.data.utils.synthetic import SyntheticAnomalyDataset @@ -31,13 +32,23 @@ class AnomalibDataModule(LightningDataModule, ABC): train_batch_size (int): Batch size used by the train dataloader. eval_batch_size (int): Batch size used by the val and test dataloaders. num_workers (int): Number of workers used by the train, val and test dataloaders. - val_split_mode (ValSplitMode): Determines how the validation split is obtained. + val_split_mode (ValSplitMode | str): Determines how the validation split is obtained. Options: [none, same_as_test, from_test, synthetic] val_split_ratio (float): Fraction of the train or test images held our for validation. - test_split_mode (Optional[TestSplitMode], optional): Determines how the test split is obtained. + test_split_mode (TestSplitMode | str | None, optional): Determines how the test split is obtained. Options: [none, from_dir, synthetic]. Defaults to ``None``. - test_split_ratio (float): Fraction of the train images held out for testing. + test_split_ratio (float | None, optional): Fraction of the train images held out for testing. + Defaults to ``None``. + image_size (tuple[int, int] | None, optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform | None, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform | None, optional): Transforms that should be applied to the input images + during training. + Defaults to ``None``. + eval_transform (Transform | None, optional): Transforms that should be applied to the input images + during evaluation. Defaults to ``None``. seed (int | None, optional): Seed used during random subset splitting. Defaults to ``None``. @@ -52,6 +63,10 @@ def __init__( val_split_ratio: float, test_split_mode: TestSplitMode | str | None = None, test_split_ratio: float | None = None, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, seed: int | None = None, ) -> None: super().__init__() @@ -62,8 +77,18 @@ def __init__( self.test_split_ratio = test_split_ratio self.val_split_mode = ValSplitMode(val_split_mode) self.val_split_ratio = val_split_ratio + self.image_size = image_size self.seed = seed + # set transforms + if bool(train_transform) != bool(eval_transform): + msg = "Only one of train_transform and eval_transform was specified. This is not recommended because \ + it could lead to unexpected behaviour. Please ensure training and eval transforms have the same \ + reshape and normalization characteristics." + logger.warning(msg) + self._train_transform = train_transform or transform + self._eval_transform = eval_transform or transform + self.train_data: AnomalibDataset self.val_data: AnomalibDataset self.test_data: AnomalibDataset @@ -205,6 +230,46 @@ def predict_dataloader(self) -> EVAL_DATALOADERS: """Use the test dataloader for inference unless overridden.""" return self.test_dataloader() + @property + def transform(self) -> Transform: + """Property that returns the user-specified transform for the datamodule, if any. + + This property is accessed by the engine to set the transform for the model. The eval_transform takes precedence + over the train_transform, because the transform that we store in the model is the one that should be used during + inference. + """ + if self._eval_transform: + return self._eval_transform + return None + + @property + def train_transform(self) -> Transform: + """Get the transforms that will be passed to the train dataset. + + If the train_transform is not set, the engine will request the transform from the model. + """ + if self._train_transform: + return self._train_transform + if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: + return self.trainer.lightning_module.transform + if self.image_size: + return Resize(self.image_size, antialias=True) + return None + + @property + def eval_transform(self) -> Transform: + """Get the transform that will be passed to the val/test/predict datasets. + + If the eval_transform is not set, the engine will request the transform from the model. + """ + if self._eval_transform: + return self._eval_transform + if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: + return self.trainer.lightning_module.transform + if self.image_size: + return Resize(self.image_size, antialias=True) + return None + @classmethod def from_config( cls: type["AnomalibDataModule"], diff --git a/src/anomalib/data/datamodules/depth/folder_3d.py b/src/anomalib/data/datamodules/depth/folder_3d.py index 2e2930be26..cebea42d02 100644 --- a/src/anomalib/data/datamodules/depth/folder_3d.py +++ b/src/anomalib/data/datamodules/depth/folder_3d.py @@ -8,6 +8,8 @@ from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.folder_3d import Folder3DDataset @@ -49,6 +51,14 @@ class Folder3D(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -77,6 +87,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -87,6 +101,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -109,6 +127,7 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = Folder3DDataset( name=self.name, task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -124,6 +143,7 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = Folder3DDataset( name=self.name, task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/depth/mvtec_3d.py b/src/anomalib/data/datamodules/depth/mvtec_3d.py index b833643419..1e5b90e917 100644 --- a/src/anomalib/data/datamodules/depth/mvtec_3d.py +++ b/src/anomalib/data/datamodules/depth/mvtec_3d.py @@ -22,6 +22,8 @@ import logging from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.mvtec_3d import MVTec3DDataset @@ -60,9 +62,13 @@ class MVTec3D(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. + image_size (tuple[int, int], optional): Size to which input images should be resized. Defaults to ``None``. - test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. @@ -84,6 +90,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -94,6 +104,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -108,12 +122,14 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = MVTec3DDataset( task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTec3DDataset( task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/btech.py b/src/anomalib/data/datamodules/image/btech.py index 11bbcf387f..bd3d762cf2 100644 --- a/src/anomalib/data/datamodules/image/btech.py +++ b/src/anomalib/data/datamodules/image/btech.py @@ -14,6 +14,7 @@ from pathlib import Path import cv2 +from torchvision.transforms.v2 import Transform from tqdm import tqdm from anomalib import TaskType @@ -52,6 +53,14 @@ class BTech(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. test_split_mode (TestSplitMode, optional): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float, optional): Fraction of images from the train set that will be reserved for testing. @@ -70,9 +79,13 @@ class BTech(AnomalibDataModule): >>> datamodule = BTech( ... root="./datasets/BTech", ... category="01", + ... image_size=(256, 256), ... train_batch_size=32, ... eval_batch_size=32, ... num_workers=8, + ... transform=None, + ... train_transform=None, + ... eval_transform=None, ... ) >>> datamodule.setup() @@ -109,6 +122,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -119,6 +136,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -133,12 +154,14 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = BTechDataset( task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = BTechDataset( task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/folder.py b/src/anomalib/data/datamodules/image/folder.py index 7fe51c32a0..e40f2322f5 100644 --- a/src/anomalib/data/datamodules/image/folder.py +++ b/src/anomalib/data/datamodules/image/folder.py @@ -9,6 +9,8 @@ from collections.abc import Sequence from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.folder import FolderDataset @@ -20,94 +22,90 @@ class Folder(AnomalibDataModule): Args: name (str): Name of the dataset. This is used to name the datamodule, especially when logging/saving. - normal_dir (str | Path | Sequence): Name of the directory containing normal images. - root (str | Path | None): Path to the root folder containing normal and abnormal dirs. - Defaults to ``None``. - abnormal_dir (str | Path | None | Sequence): Name of the directory containing abnormal images. + normal_dir (str | Path | Sequence[str | Path]): Path to the directory containing normal images. + root (str | Path | None): Path to the root folder containing normal and abnormal dirs. Defaults to ``None``. + abnormal_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing abnormal images. Defaults to ``None``. - normal_test_dir (str | Path | Sequence | None, optional): Path to the directory containing - normal images for the test dataset. + normal_test_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing + normal images for the test dataset. Defaults to ``None``. + mask_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing + the mask annotations. Defaults to ``None``. + normal_split_ratio (float): Ratio to split normal training images and add to the + test set in case test set doesn't contain any normal images. Defaults to 0.2. + extensions (tuple[str, ...] | None): Type of the image extensions to read from the + directory. Defaults to ``None``. + train_batch_size (int): Training batch size. Defaults to 32. + eval_batch_size (int): Validation, test and predict batch size. Defaults to 32. + num_workers (int): Number of workers. Defaults to 8. + task (TaskType | str): Task type. Could be ``classification``, ``detection`` or ``segmentation``. + Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int] | None): Size to which input images should be resized. Defaults to ``None``. + transform (Transform | None): Transforms that should be applied to the input images. Defaults to ``None``. + train_transform (Transform | None): Transforms that should be applied to the input images during training. Defaults to ``None``. - mask_dir (str | Path | Sequence | None, optional): Path to the directory containing - the mask annotations. - Defaults to ``None``. - normal_split_ratio (float, optional): Ratio to split normal training images and add to the - test set in case test set doesn't contain any normal images. - Defaults to 0.2. - extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the - directory. + eval_transform (Transform | None): Transforms that should be applied to the input images during evaluation. Defaults to ``None``. - train_batch_size (int, optional): Training batch size. - Defaults to ``32``. - eval_batch_size (int, optional): Validation, test and predict batch size. - Defaults to ``32``. - num_workers (int, optional): Number of workers. - Defaults to ``8``. - task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. - Defaults to ``segmentation``. - test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. + test_split_mode (TestSplitMode | str): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. - Defaults to ``0.2``. - val_split_mode (ValSplitMode): Setting that determines how the validation subset is obtained. + Defaults to 0.2. + val_split_mode (ValSplitMode | str): Setting that determines how the validation subset is obtained. Defaults to ``ValSplitMode.FROM_TEST``. val_split_ratio (float): Fraction of train or test images that will be reserved for validation. - Defaults to ``0.5``. - seed (int | None, optional): Seed used during random subset splitting. - Defaults to ``None``. + Defaults to 0.5. + seed (int | None): Seed used during random subset splitting. Defaults to ``None``. Examples: - The following code demonstrates how to use the ``Folder`` datamodule. Assume that the dataset is structured - as follows: + The following code demonstrates how to use the ``Folder`` datamodule: + + >>> from pathlib import Path + >>> from anomalib.data import Folder + >>> from anomalib import TaskType + + >>> dataset_root = Path("./sample_dataset") + >>> folder_datamodule = Folder( + ... name="my_folder_dataset", + ... root=dataset_root, + ... normal_dir="good", + ... abnormal_dir="crack", + ... task=TaskType.SEGMENTATION, + ... mask_dir=dataset_root / "mask" / "crack", + ... image_size=(256, 256), + ... train_batch_size=32, + ... eval_batch_size=32, + ... num_workers=8, + ... ) + >>> folder_datamodule.setup() + + >>> # Access the training images + >>> train_dataloader = folder_datamodule.train_dataloader() + >>> batch = next(iter(train_dataloader)) + >>> print(batch.keys(), batch["image"].shape) + + >>> # Access the test images + >>> test_dataloader = folder_datamodule.test_dataloader() + >>> batch = next(iter(test_dataloader)) + >>> print(batch.keys(), batch["image"].shape) + + Note: + The dataset is expected to have a structure similar to: .. code-block:: bash - $ tree sample_dataset - sample_dataset - ├── colour - │ ├── 00.jpg - │ ├── ... - │ └── x.jpg - ├── crack - │ ├── 00.jpg - │ ├── ... - │ └── y.jpg - ├── good - │ ├── ... - │ └── z.jpg - ├── LICENSE - └── mask - ├── colour - │ ├── ... - │ └── x.jpg - └── crack - ├── ... - └── y.jpg - - .. code-block:: python - - folder_datamodule = Folder( - root=dataset_root, - normal_dir="good", - abnormal_dir="crack", - task=TaskType.SEGMENTATION, - mask_dir=dataset_root / "mask" / "crack", - ) - folder_datamodule.setup() - - To access the training images, - - .. code-block:: python - - >> i, data = next(enumerate(folder_datamodule.train_dataloader())) - >> print(data.keys(), data["image"].shape) - - To access the test images, - - .. code-block:: python - - >> i, data = next(enumerate(folder_datamodule.test_dataloader())) - >> print(data.keys(), data["image"].shape) + sample_dataset/ + ├── good/ + │ ├── normal_image1.jpg + │ ├── normal_image2.jpg + │ └── ... + ├── crack/ + │ ├── anomaly_image1.jpg + │ ├── anomaly_image2.jpg + │ └── ... + └── mask/ + └── crack/ + ├── anomaly_mask1.png + ├── anomaly_mask2.png + └── ... """ def __init__( @@ -124,6 +122,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -148,6 +150,10 @@ def __init__( test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, seed=seed, ) @@ -166,6 +172,7 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = FolderDataset( name=self.name, task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -178,6 +185,7 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = FolderDataset( name=self.name, task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/image/kolektor.py b/src/anomalib/data/datamodules/image/kolektor.py index fa766b7535..2f8dc3b92b 100644 --- a/src/anomalib/data/datamodules/image/kolektor.py +++ b/src/anomalib/data/datamodules/image/kolektor.py @@ -20,6 +20,8 @@ import logging from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.kolektor import KolektorDataset @@ -54,6 +56,14 @@ class Kolektor(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR`` test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -73,6 +83,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -83,6 +97,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -96,11 +114,13 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = KolektorDataset( task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, ) self.test_data = KolektorDataset( task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, ) diff --git a/src/anomalib/data/datamodules/image/mvtec.py b/src/anomalib/data/datamodules/image/mvtec.py index da23a1644a..508a582380 100644 --- a/src/anomalib/data/datamodules/image/mvtec.py +++ b/src/anomalib/data/datamodules/image/mvtec.py @@ -28,6 +28,8 @@ import logging from pathlib import Path +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.mvtec import MVTecDataset @@ -66,6 +68,14 @@ class MVTec(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -93,9 +103,9 @@ class MVTec(AnomalibDataModule): >>> datamodule = MVTec(category="cable") - To change the batch size: + To change the image and batch size: - >>> datamodule = MVTec(train_batch_size=16, eval_batch_size=8) + >>> datamodule = MVTec(image_size=(512, 512), train_batch_size=16, eval_batch_size=8) MVTec AD dataset does not provide a validation set. If you would like to use a separate validation set, you can use the ``val_split_mode`` and @@ -119,6 +129,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -128,6 +142,10 @@ def __init__( super().__init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, num_workers=num_workers, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, @@ -154,12 +172,14 @@ def _setup(self, _stage: str | None = None) -> None: """ self.train_data = MVTecDataset( task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTecDataset( task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/visa.py b/src/anomalib/data/datamodules/image/visa.py index 6c30e58956..30bf945c73 100644 --- a/src/anomalib/data/datamodules/image/visa.py +++ b/src/anomalib/data/datamodules/image/visa.py @@ -28,6 +28,7 @@ from pathlib import Path import cv2 +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule @@ -65,6 +66,14 @@ class Visa(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -85,6 +94,10 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -95,6 +108,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -110,12 +127,14 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = VisaDataset( task=self.task, + transform=self.train_transform, split=Split.TRAIN, root=self.split_root, category=self.category, ) self.test_data = VisaDataset( task=self.task, + transform=self.eval_transform, split=Split.TEST, root=self.split_root, category=self.category, diff --git a/src/anomalib/data/datamodules/video/avenue.py b/src/anomalib/data/datamodules/video/avenue.py index 67a0614ca1..8914475081 100644 --- a/src/anomalib/data/datamodules/video/avenue.py +++ b/src/anomalib/data/datamodules/video/avenue.py @@ -21,6 +21,7 @@ import cv2 import scipy.io +from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule @@ -63,6 +64,14 @@ class Avenue(AnomalibVideoDataModule): Defaults to ``VideoTargetFrame.LAST``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to ``32``. eval_batch_size (int, optional): Test batch size. @@ -132,6 +141,10 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame | str = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -143,6 +156,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -158,6 +175,7 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = AvenueDataset( task=self.task, + transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -168,6 +186,7 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = AvenueDataset( task=self.task, + transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, From 138c7e30a313bc5a454137d2f4b87312ca9c6cb0 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 12:40:28 +0100 Subject: [PATCH 24/59] Revert datasets Signed-off-by: Samet Akcay --- .../data/datamodules/video/shanghaitech.py | 20 +++++++++++++++++ .../data/datamodules/video/ucsd_ped.py | 22 +++++++++++++++++-- src/anomalib/data/datasets/depth/folder_3d.py | 7 +++--- src/anomalib/data/datasets/depth/mvtec_3d.py | 8 +++---- src/anomalib/data/datasets/image/btech.py | 6 ++++- src/anomalib/data/datasets/image/folder.py | 14 ++++++++---- src/anomalib/data/datasets/image/kolektor.py | 7 +++--- src/anomalib/data/datasets/image/mvtec.py | 11 ++++++---- src/anomalib/data/datasets/image/visa.py | 14 ++++++++---- src/anomalib/data/datasets/video/avenue.py | 8 +++++-- .../data/datasets/video/shanghaitech.py | 2 +- src/anomalib/data/datasets/video/ucsd_ped.py | 2 +- 12 files changed, 92 insertions(+), 29 deletions(-) diff --git a/src/anomalib/data/datamodules/video/shanghaitech.py b/src/anomalib/data/datamodules/video/shanghaitech.py index a50661496e..b474f09547 100644 --- a/src/anomalib/data/datamodules/video/shanghaitech.py +++ b/src/anomalib/data/datamodules/video/shanghaitech.py @@ -20,6 +20,8 @@ from pathlib import Path from shutil import move +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame @@ -51,6 +53,14 @@ class ShanghaiTech(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task TaskType): Task type, 'classification', 'detection' or 'segmentation' + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to 32. eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. @@ -67,6 +77,10 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -78,6 +92,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -94,6 +112,7 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = ShanghaiTechDataset( task=self.task, + transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -104,6 +123,7 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = ShanghaiTechDataset( task=self.task, + transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datamodules/video/ucsd_ped.py b/src/anomalib/data/datamodules/video/ucsd_ped.py index 410efd7728..2dd480ef37 100644 --- a/src/anomalib/data/datamodules/video/ucsd_ped.py +++ b/src/anomalib/data/datamodules/video/ucsd_ped.py @@ -7,6 +7,8 @@ from pathlib import Path from shutil import move +from torchvision.transforms.v2 import Transform + from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame @@ -32,10 +34,16 @@ class UCSDped(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task (TaskType): Task type, 'classification', 'detection' or 'segmentation' - train_batch_size (int, optional): Training batch size. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. Defaults to ``None``. - eval_batch_size (int, optional): Test batch size. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. Defaults to ``None``. + train_batch_size (int, optional): Training batch size. Defaults to 32. + eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. val_split_mode (ValSplitMode): Setting that determines how the validation subset is obtained. val_split_ratio (float): Fraction of train or test images that will be reserved for validation. @@ -50,6 +58,10 @@ def __init__( frames_between_clips: int = 10, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, + image_size: tuple[int, int] | None = None, + transform: Transform | None = None, + train_transform: Transform | None = None, + eval_transform: Transform | None = None, train_batch_size: int = 8, eval_batch_size: int = 8, num_workers: int = 8, @@ -61,6 +73,10 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, + image_size=image_size, + transform=transform, + train_transform=train_transform, + eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -77,6 +93,7 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = UCSDpedDataset( task=self.task, + transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -87,6 +104,7 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = UCSDpedDataset( task=self.task, + transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datasets/depth/folder_3d.py b/src/anomalib/data/datasets/depth/folder_3d.py index e181145cdd..a176674ff0 100644 --- a/src/anomalib/data/datasets/depth/folder_3d.py +++ b/src/anomalib/data/datasets/depth/folder_3d.py @@ -24,6 +24,7 @@ class Folder3DDataset(AnomalibDepthDataset): Args: name (str): Name of the dataset. task (TaskType): Task type. (``classification``, ``detection`` or ``segmentation``). + transform (Transform): Transforms that should be applied to the input images. normal_dir (str | Path): Path to the directory containing normal images. root (str | Path | None): Root folder of the dataset. Defaults to ``None``. @@ -44,11 +45,11 @@ class Folder3DDataset(AnomalibDepthDataset): normal_test_depth_dir (str | Path | None, optional): Path to the directory containing normal depth images for the test dataset. Normal test images will be a split of `normal_dir` if `None`. Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. split (str | Split | None): Fixed subset split that follows from folder structure on file system. Choose from [Split.FULL, Split.TRAIN, Split.TEST] Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. - Defaults to ``None``. extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the directory. Defaults to ``None``. @@ -69,8 +70,8 @@ def __init__( normal_depth_dir: str | Path | None = None, abnormal_depth_dir: str | Path | None = None, normal_test_depth_dir: str | Path | None = None, - split: str | Split | None = None, transform: Transform | None = None, + split: str | Split | None = None, extensions: tuple[str, ...] | None = None, ) -> None: super().__init__(task, transform) diff --git a/src/anomalib/data/datasets/depth/mvtec_3d.py b/src/anomalib/data/datasets/depth/mvtec_3d.py index da2c68f86a..de6d326a4a 100644 --- a/src/anomalib/data/datasets/depth/mvtec_3d.py +++ b/src/anomalib/data/datasets/depth/mvtec_3d.py @@ -43,9 +43,9 @@ class MVTec3DDataset(AnomalibDepthDataset): Defaults to ``"./datasets/MVTec3D"``. category (str): Sub-category of the dataset, e.g. 'bagel' Defaults to ``"bagel"``. - split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. + split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. """ @@ -54,10 +54,10 @@ def __init__( task: TaskType, root: Path | str = "./datasets/MVTec3D", category: str = "bagel", - split: str | Split | None = None, transform: Transform | None = None, + split: str | Split | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task=task, transform=transform) self.root_category = Path(root) / Path(category) self.split = split diff --git a/src/anomalib/data/datasets/image/btech.py b/src/anomalib/data/datasets/image/btech.py index 7fc9ef05f1..412097c912 100644 --- a/src/anomalib/data/datasets/image/btech.py +++ b/src/anomalib/data/datasets/image/btech.py @@ -28,15 +28,19 @@ class BTechDataset(AnomalibDataset): Args: root: Path to the BTech dataset category: Name of the BTech category. - transform: Transform to apply to the input images. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. split: 'train', 'val' or 'test' task: ``classification``, ``detection`` or ``segmentation`` create_validation_set: Create a validation subset in addition to the train and test subsets Examples: >>> from anomalib.data.image.btech import BTechDataset + >>> from anomalib.data.utils.transforms import get_transforms + >>> transform = get_transforms(image_size=256) >>> dataset = BTechDataset( ... task="classification", + ... transform=transform, ... root='./datasets/BTech', ... category='01', ... ) diff --git a/src/anomalib/data/datasets/image/folder.py b/src/anomalib/data/datasets/image/folder.py index 18e162ee1c..48415c0867 100644 --- a/src/anomalib/data/datasets/image/folder.py +++ b/src/anomalib/data/datasets/image/folder.py @@ -27,6 +27,8 @@ class FolderDataset(AnomalibDataset): Args: name (str): Name of the dataset. This is used to name the datamodule, especially when logging/saving. task (TaskType): Task type. (``classification``, ``detection`` or ``segmentation``). + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. normal_dir (str | Path | Sequence): Path to the directory containing normal images. root (str | Path | None): Root folder of the dataset. Defaults to ``None``. @@ -41,8 +43,6 @@ class FolderDataset(AnomalibDataset): split (str | Split | None): Fixed subset split that follows from folder structure on file system. Choose from [Split.FULL, Split.TRAIN, Split.TEST] Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. - Defaults to ``None``. extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the directory. Defaults to ``None``. @@ -52,7 +52,12 @@ class FolderDataset(AnomalibDataset): Examples: Assume that we would like to use this ``FolderDataset`` to create a dataset from a folder for a classification - task. + task. We could first create the transforms, + + >>> from anomalib.data.utils import InputNormalizationMethod, get_transforms + >>> transform = get_transforms(image_size=256, normalization=InputNormalizationMethod.NONE) + + We could then create the dataset as follows, .. code-block:: python @@ -60,6 +65,7 @@ class FolderDataset(AnomalibDataset): normal_dir=dataset_root / "good", abnormal_dir=dataset_root / "crack", split="train", + transform=transform, task=TaskType.CLASSIFICATION, ) @@ -70,12 +76,12 @@ def __init__( name: str, task: TaskType, normal_dir: str | Path | Sequence[str | Path], + transform: Transform | None = None, root: str | Path | None = None, abnormal_dir: str | Path | Sequence[str | Path] | None = None, normal_test_dir: str | Path | Sequence[str | Path] | None = None, mask_dir: str | Path | Sequence[str | Path] | None = None, split: str | Split | None = None, - transform: Transform | None = None, extensions: tuple[str, ...] | None = None, ) -> None: super().__init__(task, transform) diff --git a/src/anomalib/data/datasets/image/kolektor.py b/src/anomalib/data/datasets/image/kolektor.py index e64c65c70a..39e9380a03 100644 --- a/src/anomalib/data/datasets/image/kolektor.py +++ b/src/anomalib/data/datasets/image/kolektor.py @@ -38,19 +38,20 @@ class KolektorDataset(AnomalibDataset): task (TaskType): Task type, ``classification``, ``detection`` or ``segmentation`` root (Path | str): Path to the root of the dataset Defaults to ``./datasets/kolektor``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. """ def __init__( self, task: TaskType, root: Path | str = "./datasets/kolektor", - split: str | Split | None = None, transform: Transform | None = None, + split: str | Split | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task=task, transform=transform) self.root = root self.split = split diff --git a/src/anomalib/data/datasets/image/mvtec.py b/src/anomalib/data/datasets/image/mvtec.py index 64146bb5d4..bb6fdf9e41 100644 --- a/src/anomalib/data/datasets/image/mvtec.py +++ b/src/anomalib/data/datasets/image/mvtec.py @@ -65,18 +65,21 @@ class MVTecDataset(AnomalibDataset): Defaults to ``./datasets/MVTec``. category (str): Sub-category of the dataset, e.g. 'bottle' Defaults to ``bottle``. - split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. + split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. Examples: .. code-block:: python from anomalib.data.image.mvtec import MVTecDataset + from anomalib.data.utils.transforms import get_transforms + transform = get_transforms(image_size=256) dataset = MVTecDataset( task="classification", + transform=transform, root='./datasets/MVTec', category='zipper', ) @@ -107,10 +110,10 @@ def __init__( task: TaskType, root: Path | str = "./datasets/MVTec", category: str = "bottle", - split: str | Split | None = None, transform: Transform | None = None, + split: str | Split | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task=task, transform=transform) self.root_category = Path(root) / Path(category) self.category = category diff --git a/src/anomalib/data/datasets/image/visa.py b/src/anomalib/data/datasets/image/visa.py index 70051b9406..9c5336ab05 100644 --- a/src/anomalib/data/datasets/image/visa.py +++ b/src/anomalib/data/datasets/image/visa.py @@ -52,9 +52,9 @@ class VisaDataset(AnomalibDataset): task (TaskType): Task type, ``classification``, ``detection`` or ``segmentation`` root (str | Path): Path to the root of the dataset category (str): Sub-category of the dataset, e.g. 'candle' - split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. + split (str | Split | None): Split of the dataset, usually Split.TRAIN or Split.TEST Defaults to ``None``. Examples: @@ -63,9 +63,12 @@ class VisaDataset(AnomalibDataset): .. code-block:: python from anomalib.data.image.visa import VisaDataset + from anomalib.data.utils.transforms import get_transforms + transform = get_transforms(image_size=256) dataset = VisaDataset( task="classification", + transform=transform, split="train", root="./datasets/visa/visa_pytorch/", category="candle", @@ -83,9 +86,12 @@ class VisaDataset(AnomalibDataset): .. code-block:: python from anomalib.data.image.visa import VisaDataset + from anomalib.data.utils.transforms import get_transforms + transform = get_transforms(image_size=256) dataset = VisaDataset( task="segmentation", + transform=transform, split="train", root="./datasets/visa/visa_pytorch/", category="candle", @@ -103,10 +109,10 @@ def __init__( task: TaskType, root: str | Path, category: str, - split: str | Split | None = None, transform: Transform | None = None, + split: str | Split | None = None, ) -> None: - super().__init__(task, transform) + super().__init__(task=task, transform=transform) self.root_category = Path(root) / category self.split = split diff --git a/src/anomalib/data/datasets/video/avenue.py b/src/anomalib/data/datasets/video/avenue.py index 84caebb044..0d3bd741bf 100644 --- a/src/anomalib/data/datasets/video/avenue.py +++ b/src/anomalib/data/datasets/video/avenue.py @@ -47,7 +47,7 @@ class AvenueDataset(AnomalibVideoDataset): Defaults to ``1``. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. Defaults to ``VideoTargetFrame.LAST``. - transform (Transform | None, optional): Transforms that should be applied to the input images. + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. Examples: @@ -55,8 +55,10 @@ class AvenueDataset(AnomalibVideoDataset): .. code-block:: python + transform = A.Compose([A.Resize(256, 256), A.pytorch.ToTensorV2()]) dataset = AvenueDataset( task="classification", + transform=transform, split="train", root="./datasets/avenue/", ) @@ -72,6 +74,7 @@ class AvenueDataset(AnomalibVideoDataset): dataset = AvenueDataset( task="segmentation", + transform=transform, split="test", root="./datasets/avenue/", ) @@ -89,6 +92,7 @@ class AvenueDataset(AnomalibVideoDataset): dataset = AvenueDataset( task="classification", + transform=transform, split="test", root="./datasets/avenue/", clip_length_in_frames=1, @@ -110,8 +114,8 @@ def __init__( gt_dir: Path | str = "./datasets/avenue/ground_truth_demo", clip_length_in_frames: int = 2, frames_between_clips: int = 1, - target_frame: VideoTargetFrame = VideoTargetFrame.LAST, transform: Transform | None = None, + target_frame: VideoTargetFrame = VideoTargetFrame.LAST, ) -> None: super().__init__( task=task, diff --git a/src/anomalib/data/datasets/video/shanghaitech.py b/src/anomalib/data/datasets/video/shanghaitech.py index c49bb32332..e90dbae482 100644 --- a/src/anomalib/data/datasets/video/shanghaitech.py +++ b/src/anomalib/data/datasets/video/shanghaitech.py @@ -42,7 +42,7 @@ class ShanghaiTechDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. - transform (Transform | None, optional): Transforms that should be applied to the input images. + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. """ diff --git a/src/anomalib/data/datasets/video/ucsd_ped.py b/src/anomalib/data/datasets/video/ucsd_ped.py index 00fa3ba8ca..960218e79e 100644 --- a/src/anomalib/data/datasets/video/ucsd_ped.py +++ b/src/anomalib/data/datasets/video/ucsd_ped.py @@ -33,7 +33,7 @@ class UCSDpedDataset(AnomalibVideoDataset): clip_length_in_frames (int, optional): Number of video frames in each clip. frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval. - transform (Transform | None, optional): Transforms that should be applied to the input images. + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. """ From 2e9a544d49141af78c6a61b44adba5c3c7e3e54b Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 12:43:51 +0100 Subject: [PATCH 25/59] Revert notebooks Signed-off-by: Samet Akcay --- notebooks/100_datamodules/101_btech.ipynb | 1 + notebooks/100_datamodules/102_mvtec.ipynb | 1 + notebooks/100_datamodules/103_folder.ipynb | 1 + notebooks/200_models/201_fastflow.ipynb | 3 ++- notebooks/600_loggers/601_mlflow_logging.ipynb | 1 + notebooks/700_metrics/701a_aupimo.ipynb | 1 + notebooks/700_metrics/701b_aupimo_advanced_i.ipynb | 1 + notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb | 1 + 8 files changed, 9 insertions(+), 1 deletion(-) diff --git a/notebooks/100_datamodules/101_btech.ipynb b/notebooks/100_datamodules/101_btech.ipynb index 4e00ae5fb6..ef188665e6 100644 --- a/notebooks/100_datamodules/101_btech.ipynb +++ b/notebooks/100_datamodules/101_btech.ipynb @@ -106,6 +106,7 @@ "btech_datamodule = BTech(\n", " root=dataset_root,\n", " category=\"01\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", diff --git a/notebooks/100_datamodules/102_mvtec.ipynb b/notebooks/100_datamodules/102_mvtec.ipynb index 432c530482..4c274939d6 100644 --- a/notebooks/100_datamodules/102_mvtec.ipynb +++ b/notebooks/100_datamodules/102_mvtec.ipynb @@ -84,6 +84,7 @@ "mvtec_datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", diff --git a/notebooks/100_datamodules/103_folder.ipynb b/notebooks/100_datamodules/103_folder.ipynb index dbe94b5cdc..2f642e145a 100644 --- a/notebooks/100_datamodules/103_folder.ipynb +++ b/notebooks/100_datamodules/103_folder.ipynb @@ -102,6 +102,7 @@ " abnormal_dir=\"crack\",\n", " task=TaskType.SEGMENTATION,\n", " mask_dir=dataset_root / \"mask\" / \"crack\",\n", + " image_size=(256, 256),\n", ")\n", "folder_datamodule.setup()" ] diff --git a/notebooks/200_models/201_fastflow.ipynb b/notebooks/200_models/201_fastflow.ipynb index 57d821489e..4cf8853fb3 100644 --- a/notebooks/200_models/201_fastflow.ipynb +++ b/notebooks/200_models/201_fastflow.ipynb @@ -120,6 +120,7 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", @@ -554,7 +555,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.10.14" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/600_loggers/601_mlflow_logging.ipynb b/notebooks/600_loggers/601_mlflow_logging.ipynb index f487856e3b..f45a7a0e74 100644 --- a/notebooks/600_loggers/601_mlflow_logging.ipynb +++ b/notebooks/600_loggers/601_mlflow_logging.ipynb @@ -197,6 +197,7 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=24,\n", diff --git a/notebooks/700_metrics/701a_aupimo.ipynb b/notebooks/700_metrics/701a_aupimo.ipynb index 4ba4d38adc..5c5497b3b8 100644 --- a/notebooks/700_metrics/701a_aupimo.ipynb +++ b/notebooks/700_metrics/701a_aupimo.ipynb @@ -140,6 +140,7 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", diff --git a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb index 80643fee99..a785075060 100644 --- a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb +++ b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb @@ -164,6 +164,7 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", diff --git a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb index 0798f94e55..ed647ef666 100644 --- a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb +++ b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb @@ -158,6 +158,7 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", + " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", From cd524b570c24407689d6aa8fafa9c82def90f388 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 14:30:55 +0100 Subject: [PATCH 26/59] remove padim preprocessor Signed-off-by: Samet Akcay --- .../models/image/padim/lightning_model.py | 12 -------- src/anomalib/pre_processing/pre_processing.py | 29 +++++++++++-------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/anomalib/models/image/padim/lightning_model.py b/src/anomalib/models/image/padim/lightning_model.py index 0b0d709e20..c81438c542 100644 --- a/src/anomalib/models/image/padim/lightning_model.py +++ b/src/anomalib/models/image/padim/lightning_model.py @@ -10,7 +10,6 @@ import torch from lightning.pytorch.utilities.types import STEP_OUTPUT -from torchvision.transforms.v2 import Compose, Normalize, Resize from anomalib import LearningType from anomalib.data import Batch @@ -129,17 +128,6 @@ def learning_type(self) -> LearningType: """ return LearningType.ONE_CLASS - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: - """Default pre-processor for Padim.""" - image_size = image_size or (256, 256) - return PreProcessor( - transform=Compose([ - Resize(image_size, antialias=True), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ]), - ) - @staticmethod def default_post_processor() -> OneClassPostProcessor: """Return the default post-processor for PADIM.""" diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 5b8c8f46a1..a950071968 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -106,18 +106,23 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non stage: The stage (e.g., 'fit', 'train', 'val', 'test', 'predict'). """ super().setup(trainer, pl_module, stage) - stage = TrainerFn(stage).value # This is to convert the stage to a string - stages = ["train", "val"] if stage == "fit" else [stage] - for current_stage in stages: - transform = getattr(self, f"{current_stage}_transform") - if transform: - if hasattr(trainer, "datamodule"): - set_datamodule_transform(trainer.datamodule, transform, current_stage) - elif hasattr(trainer, f"{current_stage}_dataloaders"): - set_dataloader_transform(getattr(trainer, f"{current_stage}_dataloaders"), transform) - else: - msg = f"Trainer does not have a datamodule or {current_stage}_dataloaders attribute" - raise ValueError(msg) + stage = TrainerFn(stage).value # Make sure ``stage`` is a str + stage_transforms = { + "fit": self.train_transform, + "validate": self.val_transform, + "test": self.test_transform, + "predict": self.predict_transform, + } + transform = stage_transforms.get(stage) + + if transform: + if hasattr(trainer, "datamodule"): + set_datamodule_transform(trainer.datamodule, transform, stage) + elif hasattr(trainer, f"{stage}_dataloaders"): + set_dataloader_transform(getattr(trainer, f"{stage}_dataloaders"), transform) + else: + msg = f"Trainer does not have a datamodule or {stage}_dataloaders attribute" + raise ValueError(msg) def forward(self, batch: torch.Tensor) -> torch.Tensor: """Apply transforms to the batch of tensors for inference. From 8cd8f7ff89983beebbd387ea440ad1777783517d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 16:49:57 +0100 Subject: [PATCH 27/59] Update the setup logic in pre-processor Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 105 +++++++++--- src/anomalib/pre_processing/utils/__init__.py | 4 + .../pre_processing/utils/transform.py | 161 ++++++++++++++++++ 3 files changed, 245 insertions(+), 25 deletions(-) create mode 100644 src/anomalib/pre_processing/utils/__init__.py create mode 100644 src/anomalib/pre_processing/utils/transform.py diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index a950071968..3f152e0cc9 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -3,16 +3,30 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING + import torch from lightning import Callback, LightningModule, Trainer from lightning.pytorch.trainer.states import TrainerFn from torch import nn +from torch.utils.data import DataLoader from torchvision.transforms.v2 import Transform from anomalib.data.dataclasses.torch.base import Batch -from anomalib.data.utils.transform import set_dataloader_transform, set_datamodule_transform from anomalib.deploy.utils import get_exportable_transform +from .utils.transform import ( + get_dataloaders_transforms, + get_datamodule_transforms, + set_dataloaders_transforms, + set_datamodule_transforms, +) + +if TYPE_CHECKING: + from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS + + from anomalib.data import AnomalibDataModule + class PreProcessor(nn.Module, Callback): """Anomalib pre-processor. @@ -35,6 +49,12 @@ class PreProcessor(nn.Module, Callback): Notes: If only `transform` is provided, it will be used for all stages (train, val, test). + Priority of transforms: + 1. Explicitly set PreProcessor transforms (highest priority) + 2. Datamodule transforms (if PreProcessor has no transforms) + 3. Dataloader transforms (if neither PreProcessor nor datamodule have transforms) + 4. Default transforms (lowest priority) + Examples: >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor >>> from anomalib.pre_processing import PreProcessor @@ -90,39 +110,74 @@ def __init__( self.val_transform = val_transform or transform self.test_transform = test_transform or transform self.predict_transform = self.test_transform - self.exportable_transform = get_exportable_transform(self.test_transform) - def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: - """Set the transforms for datamodule or dataloaders. + def setup_transforms( + self, + datamodule: "AnomalibDataModule | None" = None, + dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS | None" = None, + ) -> None: + """Set up and propagate transforms according to priority rules. - The model-specific transforms are configured within PreProcessor and stored in - model implementation. This method sets the transforms for the datamodule or - dataloaders. + Args: + datamodule: DataModule that might contain transforms. + dataloaders: Dataloaders that might contain transforms. + """ + if isinstance(dataloaders, DataLoader): + dataloaders = [dataloaders] + + # If PreProcessor has transforms, propagate them to datamodule or dataloaders + if any([self.train_transform, self.val_transform, self.test_transform]): + transforms = { + "train": self.train_transform, + "val": self.val_transform, + "test": self.test_transform, + } + + if datamodule: + set_datamodule_transforms(datamodule, transforms) + if dataloaders: + set_dataloaders_transforms(dataloaders, transforms) + return + + # Try to get transforms from datamodule + if datamodule: + datamodule_transforms = get_datamodule_transforms(datamodule) + if datamodule_transforms: + self.train_transform = datamodule_transforms.get("train") + self.val_transform = datamodule_transforms.get("val") + self.test_transform = datamodule_transforms.get("test") + self.predict_transform = self.test_transform + self.exportable_transform = get_exportable_transform(self.test_transform) + return + + # Try to get transforms from dataloaders + if dataloaders: + dataloaders_transforms = get_dataloaders_transforms(dataloaders) + if dataloaders_transforms: + self.train_transform = dataloaders_transforms.get("train") + self.val_transform = dataloaders_transforms.get("val") + self.test_transform = dataloaders_transforms.get("test") + self.predict_transform = self.test_transform + self.exportable_transform = get_exportable_transform(self.test_transform) + + def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: + """Configure transforms at the start of each stage. Args: trainer: The Lightning trainer. pl_module: The Lightning module. - stage: The stage (e.g., 'fit', 'train', 'val', 'test', 'predict'). + stage: The stage (e.g., 'fit', 'validate', 'test', 'predict'). """ + stage = TrainerFn(stage).value # Convert string to TrainerFn enum + + if hasattr(trainer, "datamodule"): + self.setup_transforms(datamodule=trainer.datamodule) + elif hasattr(trainer, f"{stage}_dataloaders"): + dataloaders = getattr(trainer, f"{stage}_dataloaders") + self.setup_transforms(dataloaders=dataloaders) + super().setup(trainer, pl_module, stage) - stage = TrainerFn(stage).value # Make sure ``stage`` is a str - stage_transforms = { - "fit": self.train_transform, - "validate": self.val_transform, - "test": self.test_transform, - "predict": self.predict_transform, - } - transform = stage_transforms.get(stage) - - if transform: - if hasattr(trainer, "datamodule"): - set_datamodule_transform(trainer.datamodule, transform, stage) - elif hasattr(trainer, f"{stage}_dataloaders"): - set_dataloader_transform(getattr(trainer, f"{stage}_dataloaders"), transform) - else: - msg = f"Trainer does not have a datamodule or {stage}_dataloaders attribute" - raise ValueError(msg) def forward(self, batch: torch.Tensor) -> torch.Tensor: """Apply transforms to the batch of tensors for inference. diff --git a/src/anomalib/pre_processing/utils/__init__.py b/src/anomalib/pre_processing/utils/__init__.py new file mode 100644 index 0000000000..8361223189 --- /dev/null +++ b/src/anomalib/pre_processing/utils/__init__.py @@ -0,0 +1,4 @@ +"""Utility functions for pre-processing.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py new file mode 100644 index 0000000000..61d2317fc5 --- /dev/null +++ b/src/anomalib/pre_processing/utils/transform.py @@ -0,0 +1,161 @@ +"""Utility functions for transforms.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Sequence + +from torch.utils.data import DataLoader +from torchvision.transforms.v2 import Transform + +from anomalib.data import AnomalibDataModule + + +def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: + """Set a transform for a specific stage in a AnomalibDataModule. + + Args: + datamodule: The AnomalibDataModule to set the transform for. + transform: The transform to set. + stage: The stage to set the transform for. + + Note: + The stage parameter maps to dataset attributes as follows: + - 'fit' -> 'train_data' + - 'validate' -> 'val_data' + - 'test' -> 'test_data' + - 'predict' -> 'test_data' + """ + stage_datasets = { + "fit": "train_data", + "validate": "val_data", + "test": "test_data", + "predict": "test_data", + } + + dataset_attr = stage_datasets.get(stage) + if dataset_attr and hasattr(datamodule, dataset_attr): + dataset = getattr(datamodule, dataset_attr) + if hasattr(dataset, "transform"): + dataset.transform = transform + + +def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: + """Set a transform for a dataloader or list of dataloaders. + + Args: + dataloader: The dataloader(s) to set the transform for. + transform: The transform to set. + """ + if isinstance(dataloader, DataLoader): + if hasattr(dataloader.dataset, "transform"): + dataloader.dataset.transform = transform + elif isinstance(dataloader, Sequence): + for dl in dataloader: + set_dataloader_transform(dl, transform) + else: + msg = f"Unsupported dataloader type: {type(dataloader)}" + raise TypeError(msg) + + +def get_stage_transform(stage: str, transforms: dict[str, Transform | None]) -> Transform | None: + """Get the transform for a specific stage. + + Args: + stage: The stage to get the transform for (fit, validate, test, predict). + transforms: Dictionary mapping stage names to transforms. + + Returns: + Transform for the specified stage, or None if not found. + """ + stage_transforms_mapping = { + "fit": transforms.get("train"), + "validate": transforms.get("val"), + "test": transforms.get("test"), + "predict": transforms.get("test"), # predict uses test transform + } + return stage_transforms_mapping.get(stage) + + +def get_datamodule_transforms(datamodule: AnomalibDataModule) -> dict[str, Transform] | None: + """Get transforms from datamodule if available. + + Args: + datamodule: The datamodule to get transforms from. + + Returns: + Dictionary of transforms if found in datamodule, None otherwise. + """ + if hasattr(datamodule, "train_transform") and hasattr(datamodule, "eval_transform"): + return { + "train": datamodule.train_transform, + "val": datamodule.eval_transform, + "test": datamodule.eval_transform, + } + return None + + +def get_dataloaders_transforms(dataloaders: Sequence[DataLoader]) -> dict[str, Transform]: + """Get transforms from dataloaders. + + Args: + dataloaders: The dataloaders to get transforms from. + + Returns: + Dictionary mapping stages to their transforms. + """ + transforms: dict[str, Transform] = {} + stage_lookup = { + "fit": "train", + "validate": "val", + "test": "test", + "predict": "test", + } + + for dataloader in dataloaders: + if not hasattr(dataloader, "dataset") or not hasattr(dataloader.dataset, "transform"): + continue + + for stage in stage_lookup: + if hasattr(dataloader, f"{stage}_dataloader"): + transforms[stage_lookup[stage]] = dataloader.dataset.transform + + return transforms + + +def set_datamodule_transforms(datamodule: AnomalibDataModule, transforms: dict[str, Transform | None]) -> None: + """Set transforms to a datamodule. + + Args: + datamodule: The datamodule to propagate transforms to. + transforms: Dictionary mapping stages to their transforms. + """ + for stage in ["fit", "validate", "test", "predict"]: + transform = get_stage_transform(stage, transforms) + if transform is not None: + set_datamodule_transform(datamodule, transform, stage) + + +def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: dict[str, Transform | None]) -> None: + """Set transforms to dataloaders. + + Args: + dataloaders: The dataloaders to propagate transforms to. + transforms: Dictionary mapping stages to their transforms. + """ + stage_mapping = { + "fit": "train", + "validate": "val", + "test": "test", + "predict": "test", # predict uses test transform + } + + for loader in dataloaders: + if not hasattr(loader, "dataset"): + continue + + for stage in stage_mapping: + if hasattr(loader, f"{stage}_dataloader"): + transform = transforms.get(stage_mapping[stage]) + if transform is not None: + set_dataloader_transform([loader], transform) From d07f0b96587f15ee971ae963e849f459e165c739 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 23 Oct 2024 16:52:47 +0100 Subject: [PATCH 28/59] Revert datamodules Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 16 +- src/anomalib/data/datamodules/image/btech.py | 7 +- src/anomalib/data/datamodules/image/folder.py | 158 ++++++++++-------- 3 files changed, 96 insertions(+), 85 deletions(-) diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index feb1d00c3b..28fd9499eb 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -32,23 +32,21 @@ class AnomalibDataModule(LightningDataModule, ABC): train_batch_size (int): Batch size used by the train dataloader. eval_batch_size (int): Batch size used by the val and test dataloaders. num_workers (int): Number of workers used by the train, val and test dataloaders. - val_split_mode (ValSplitMode | str): Determines how the validation split is obtained. + val_split_mode (ValSplitMode): Determines how the validation split is obtained. Options: [none, same_as_test, from_test, synthetic] val_split_ratio (float): Fraction of the train or test images held our for validation. - test_split_mode (TestSplitMode | str | None, optional): Determines how the test split is obtained. + test_split_mode (Optional[TestSplitMode], optional): Determines how the test split is obtained. Options: [none, from_dir, synthetic]. Defaults to ``None``. - test_split_ratio (float | None, optional): Fraction of the train images held out for testing. + test_split_ratio (float): Fraction of the train images held out for testing. Defaults to ``None``. - image_size (tuple[int, int] | None, optional): Size to which input images should be resized. + image_size (tuple[int, int], optional): Size to which input images should be resized. Defaults to ``None``. - transform (Transform | None, optional): Transforms that should be applied to the input images. + transform (Transform, optional): Transforms that should be applied to the input images. Defaults to ``None``. - train_transform (Transform | None, optional): Transforms that should be applied to the input images - during training. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. Defaults to ``None``. - eval_transform (Transform | None, optional): Transforms that should be applied to the input images - during evaluation. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. Defaults to ``None``. seed (int | None, optional): Seed used during random subset splitting. Defaults to ``None``. diff --git a/src/anomalib/data/datamodules/image/btech.py b/src/anomalib/data/datamodules/image/btech.py index bd3d762cf2..5abda6156e 100644 --- a/src/anomalib/data/datamodules/image/btech.py +++ b/src/anomalib/data/datamodules/image/btech.py @@ -79,13 +79,12 @@ class BTech(AnomalibDataModule): >>> datamodule = BTech( ... root="./datasets/BTech", ... category="01", - ... image_size=(256, 256), + ... image_size=256, ... train_batch_size=32, ... eval_batch_size=32, ... num_workers=8, - ... transform=None, - ... train_transform=None, - ... eval_transform=None, + ... transform_config_train=None, + ... transform_config_eval=None, ... ) >>> datamodule.setup() diff --git a/src/anomalib/data/datamodules/image/folder.py b/src/anomalib/data/datamodules/image/folder.py index e40f2322f5..7941ba2f7b 100644 --- a/src/anomalib/data/datamodules/image/folder.py +++ b/src/anomalib/data/datamodules/image/folder.py @@ -22,90 +22,104 @@ class Folder(AnomalibDataModule): Args: name (str): Name of the dataset. This is used to name the datamodule, especially when logging/saving. - normal_dir (str | Path | Sequence[str | Path]): Path to the directory containing normal images. - root (str | Path | None): Path to the root folder containing normal and abnormal dirs. Defaults to ``None``. - abnormal_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing abnormal images. + normal_dir (str | Path | Sequence): Name of the directory containing normal images. + root (str | Path | None): Path to the root folder containing normal and abnormal dirs. Defaults to ``None``. - normal_test_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing - normal images for the test dataset. Defaults to ``None``. - mask_dir (str | Path | Sequence[str | Path] | None): Path to the directory containing - the mask annotations. Defaults to ``None``. - normal_split_ratio (float): Ratio to split normal training images and add to the - test set in case test set doesn't contain any normal images. Defaults to 0.2. - extensions (tuple[str, ...] | None): Type of the image extensions to read from the - directory. Defaults to ``None``. - train_batch_size (int): Training batch size. Defaults to 32. - eval_batch_size (int): Validation, test and predict batch size. Defaults to 32. - num_workers (int): Number of workers. Defaults to 8. - task (TaskType | str): Task type. Could be ``classification``, ``detection`` or ``segmentation``. - Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int] | None): Size to which input images should be resized. Defaults to ``None``. - transform (Transform | None): Transforms that should be applied to the input images. Defaults to ``None``. - train_transform (Transform | None): Transforms that should be applied to the input images during training. + abnormal_dir (str | Path | None | Sequence): Name of the directory containing abnormal images. Defaults to ``None``. - eval_transform (Transform | None): Transforms that should be applied to the input images during evaluation. + normal_test_dir (str | Path | Sequence | None, optional): Path to the directory containing + normal images for the test dataset. Defaults to ``None``. - test_split_mode (TestSplitMode | str): Setting that determines how the testing subset is obtained. + mask_dir (str | Path | Sequence | None, optional): Path to the directory containing + the mask annotations. + Defaults to ``None``. + normal_split_ratio (float, optional): Ratio to split normal training images and add to the + test set in case test set doesn't contain any normal images. + Defaults to 0.2. + extensions (tuple[str, ...] | None, optional): Type of the image extensions to read from the + directory. + Defaults to ``None``. + train_batch_size (int, optional): Training batch size. + Defaults to ``32``. + eval_batch_size (int, optional): Validation, test and predict batch size. + Defaults to ``32``. + num_workers (int, optional): Number of workers. + Defaults to ``8``. + task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. + Defaults to ``segmentation``. + image_size (tuple[int, int], optional): Size to which input images should be resized. + Defaults to ``None``. + transform (Transform, optional): Transforms that should be applied to the input images. + Defaults to ``None``. + train_transform (Transform, optional): Transforms that should be applied to the input images during training. + Defaults to ``None``. + eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. + Defaults to ``None``. + test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. - Defaults to 0.2. - val_split_mode (ValSplitMode | str): Setting that determines how the validation subset is obtained. + Defaults to ``0.2``. + val_split_mode (ValSplitMode): Setting that determines how the validation subset is obtained. Defaults to ``ValSplitMode.FROM_TEST``. val_split_ratio (float): Fraction of train or test images that will be reserved for validation. - Defaults to 0.5. - seed (int | None): Seed used during random subset splitting. Defaults to ``None``. + Defaults to ``0.5``. + seed (int | None, optional): Seed used during random subset splitting. + Defaults to ``None``. Examples: - The following code demonstrates how to use the ``Folder`` datamodule: - - >>> from pathlib import Path - >>> from anomalib.data import Folder - >>> from anomalib import TaskType - - >>> dataset_root = Path("./sample_dataset") - >>> folder_datamodule = Folder( - ... name="my_folder_dataset", - ... root=dataset_root, - ... normal_dir="good", - ... abnormal_dir="crack", - ... task=TaskType.SEGMENTATION, - ... mask_dir=dataset_root / "mask" / "crack", - ... image_size=(256, 256), - ... train_batch_size=32, - ... eval_batch_size=32, - ... num_workers=8, - ... ) - >>> folder_datamodule.setup() - - >>> # Access the training images - >>> train_dataloader = folder_datamodule.train_dataloader() - >>> batch = next(iter(train_dataloader)) - >>> print(batch.keys(), batch["image"].shape) - - >>> # Access the test images - >>> test_dataloader = folder_datamodule.test_dataloader() - >>> batch = next(iter(test_dataloader)) - >>> print(batch.keys(), batch["image"].shape) - - Note: - The dataset is expected to have a structure similar to: + The following code demonstrates how to use the ``Folder`` datamodule. Assume that the dataset is structured + as follows: .. code-block:: bash - sample_dataset/ - ├── good/ - │ ├── normal_image1.jpg - │ ├── normal_image2.jpg - │ └── ... - ├── crack/ - │ ├── anomaly_image1.jpg - │ ├── anomaly_image2.jpg - │ └── ... - └── mask/ - └── crack/ - ├── anomaly_mask1.png - ├── anomaly_mask2.png - └── ... + $ tree sample_dataset + sample_dataset + ├── colour + │ ├── 00.jpg + │ ├── ... + │ └── x.jpg + ├── crack + │ ├── 00.jpg + │ ├── ... + │ └── y.jpg + ├── good + │ ├── ... + │ └── z.jpg + ├── LICENSE + └── mask + ├── colour + │ ├── ... + │ └── x.jpg + └── crack + ├── ... + └── y.jpg + + .. code-block:: python + + folder_datamodule = Folder( + root=dataset_root, + normal_dir="good", + abnormal_dir="crack", + task=TaskType.SEGMENTATION, + mask_dir=dataset_root / "mask" / "crack", + image_size=256, + normalization=InputNormalizationMethod.NONE, + ) + folder_datamodule.setup() + + To access the training images, + + .. code-block:: python + + >> i, data = next(enumerate(folder_datamodule.train_dataloader())) + >> print(data.keys(), data["image"].shape) + + To access the test images, + + .. code-block:: python + + >> i, data = next(enumerate(folder_datamodule.test_dataloader())) + >> print(data.keys(), data["image"].shape) """ def __init__( From 760d5e544a80421562c77f8f060bb5d85ea8f4c9 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 24 Oct 2024 07:10:09 +0100 Subject: [PATCH 29/59] Set datamodule transforms property from preprocessor Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 18 ++- src/anomalib/data/predict.py | 5 +- src/anomalib/data/utils/transform.py | 99 ------------- src/anomalib/deploy/utils.py | 46 ------ src/anomalib/pre_processing/pre_processing.py | 2 +- .../pre_processing/utils/transform.py | 135 +++++++++++------- 6 files changed, 106 insertions(+), 199 deletions(-) delete mode 100644 src/anomalib/data/utils/transform.py delete mode 100644 src/anomalib/deploy/utils.py diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index 28fd9499eb..8869b38ce3 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -248,8 +248,13 @@ def train_transform(self) -> Transform: """ if self._train_transform: return self._train_transform - if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: - return self.trainer.lightning_module.transform + if ( + getattr(self, "trainer", None) + and self.trainer.lightning_module + and hasattr(self.trainer.lightning_module, "pre_processor") + and hasattr(self.trainer.lightning_module.pre_processor, "train_transform") + ): + return self.trainer.lightning_module.pre_processor.train_transform if self.image_size: return Resize(self.image_size, antialias=True) return None @@ -262,8 +267,13 @@ def eval_transform(self) -> Transform: """ if self._eval_transform: return self._eval_transform - if getattr(self, "trainer", None) and self.trainer.lightning_module and self.trainer.lightning_module.transform: - return self.trainer.lightning_module.transform + if ( + getattr(self, "trainer", None) + and self.trainer.lightning_module + and hasattr(self.trainer.lightning_module, "pre_processor") + and hasattr(self.trainer.lightning_module.pre_processor, "test_transform") + ): + return self.trainer.lightning_module.pre_processor.test_transform if self.image_size: return Resize(self.image_size, antialias=True) return None diff --git a/src/anomalib/data/predict.py b/src/anomalib/data/predict.py index 645c5576b4..06c743b88f 100644 --- a/src/anomalib/data/predict.py +++ b/src/anomalib/data/predict.py @@ -47,7 +47,10 @@ def __getitem__(self, index: int) -> ImageItem: if self.transform: image = self.transform(image) - return ImageItem(image=image, image_path=str(image_filename)) + return ImageItem( + image=image, + image_path=str(image_filename), + ) @property def collate_fn(self) -> Callable: diff --git a/src/anomalib/data/utils/transform.py b/src/anomalib/data/utils/transform.py deleted file mode 100644 index 8593e3ff75..0000000000 --- a/src/anomalib/data/utils/transform.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Utility functions for data transforms.""" - -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections.abc import Sequence - -from torch.utils.data import DataLoader -from torchvision.transforms.v2 import Transform - -from anomalib.data import AnomalibDataModule - - -def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: - """Set a transform for a specific stage in a AnomalibDataModule. - - This function allows you to set a custom transform for a specific stage (train, val, or test) - in an AnomalibDataModule. It checks if the datamodule has the corresponding dataset attribute - and if that dataset has a transform attribute, then sets the new transform. - - Args: - datamodule: The AnomalibDataModule to set the transform for. - transform: The transform to set. - stage: The stage (e.g., 'train', 'val', 'test') to set the transform for. - - Examples: - >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor - >>> from anomalib.data import MVTec - >>> from anomalib.data.utils.transform import set_datamodule_transform - - >>> # Create a datamodule and check its transform - >>> datamodule = MVTec(root="path/to/dataset", category="bottle") - >>> datamodule.setup() - >>> print(datamodule.train_data.transform) # Output: None or default transform - - >>> # Define a custom transform and set it for the training stage - >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) - >>> set_datamodule_transform(datamodule, custom_transform, "train") - >>> print(datamodule.train_data.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) - - >>> # You can also set transforms for validation and test stages - >>> set_datamodule_transform(datamodule, custom_transform, "val") - >>> set_datamodule_transform(datamodule, custom_transform, "test") - - >>> # The dataloaders will now use the custom transforms - >>> train_dataloader = datamodule.train_dataloader() - >>> val_dataloader = datamodule.val_dataloader() - >>> test_dataloader = datamodule.test_dataloader() - """ - dataset_attr = f"{stage}_data" - if hasattr(datamodule, dataset_attr): - dataset = getattr(datamodule, dataset_attr) - if hasattr(dataset, "transform"): - dataset.transform = transform - - -def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: - """Set a transform for a dataloader or list of dataloaders. - - Args: - dataloader: The dataloader(s) to set the transform for. Can be a single DataLoader, - a callable returning a DataLoader, or a list of DataLoaders. - transform: The transform to set. - - Examples: - >>> from torch.utils.data import DataLoader - >>> from torchvision.transforms.v2 import Compose, Resize, ToTensor - >>> from anomalib.data import MVTecDataset - >>> from anomalib.data.utils.transform import set_dataloader_transform - - >>> # Create a dataset and dataloader - >>> dataset = MVTecDataset(root="./datasets/MVTec", category="bottle", task="segmentation") - >>> dataloader = DataLoader(dataset, batch_size=32) - - >>> # Define a custom transform and set it for a single DataLoader - >>> custom_transform = Compose([Resize((224, 224)), ToTensor()]) - >>> set_dataloader_transform(dataloader, custom_transform) - >>> print(dataloader.dataset.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) - - >>> # Set the transform for a list of DataLoaders - >>> dataset_bottle = MVTecDataset(root="./datasets/MVTec", category="bottle", task="segmentation") - >>> dataset_cable = MVTecDataset(root="./datasets/MVTec", category="cable", task="segmentation") - >>> dataloader_list = [ - ... DataLoader(dataset_bottle, batch_size=32), - ... DataLoader(dataset_cable, batch_size=32) - ... ] - >>> set_dataloader_transform(dataloader_list, custom_transform) - >>> for dl in dataloader_list: - ... print(dl.dataset.transform) # Output: Compose([Resize((224, 224)), ToTensor()]) - """ - if isinstance(dataloader, DataLoader): - if hasattr(dataloader.dataset, "transform"): - dataloader.dataset.transform = transform - elif isinstance(dataloader, Sequence): - for dl in dataloader: - set_dataloader_transform(dl, transform) - else: - msg = f"Unsupported dataloader type: {type(dataloader)}" - raise TypeError(msg) diff --git a/src/anomalib/deploy/utils.py b/src/anomalib/deploy/utils.py deleted file mode 100644 index 30131e82c9..0000000000 --- a/src/anomalib/deploy/utils.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Utility functions for Anomalib deployment module.""" - -# Copyright (C) 2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from torchvision.transforms.v2 import CenterCrop, Compose, Resize, Transform - -from anomalib.data.transforms import ExportableCenterCrop - - -def get_exportable_transform(transform: Transform | None) -> Transform | None: - """Get exportable transform. - - Some transforms are not supported by ONNX/OpenVINO, so we need to replace them with exportable versions. - """ - if transform is None: - return None - transform = disable_antialiasing(transform) - return convert_centercrop(transform) - - -def disable_antialiasing(transform: Transform) -> Transform: - """Disable antialiasing in Resize transforms. - - Resizing with antialiasing is not supported by ONNX, so we need to disable it. - """ - if isinstance(transform, Resize): - transform.antialias = False - if isinstance(transform, Compose): - for tr in transform.transforms: - disable_antialiasing(tr) - return transform - - -def convert_centercrop(transform: Transform) -> Transform: - """Convert CenterCrop to ExportableCenterCrop. - - Torchvision's CenterCrop is not supported by ONNX, so we need to replace it with our own ExportableCenterCrop. - """ - if isinstance(transform, CenterCrop): - transform = ExportableCenterCrop(size=transform.size) - if isinstance(transform, Compose): - for index in range(len(transform.transforms)): - tr = transform.transforms[index] - transform.transforms[index] = convert_centercrop(tr) - return transform diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 3f152e0cc9..1cbbdbb063 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -13,11 +13,11 @@ from torchvision.transforms.v2 import Transform from anomalib.data.dataclasses.torch.base import Batch -from anomalib.deploy.utils import get_exportable_transform from .utils.transform import ( get_dataloaders_transforms, get_datamodule_transforms, + get_exportable_transform, set_dataloaders_transforms, set_datamodule_transforms, ) diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py index 61d2317fc5..afff8344a9 100644 --- a/src/anomalib/pre_processing/utils/transform.py +++ b/src/anomalib/pre_processing/utils/transform.py @@ -6,56 +6,10 @@ from collections.abc import Sequence from torch.utils.data import DataLoader -from torchvision.transforms.v2 import Transform +from torchvision.transforms.v2 import CenterCrop, Compose, Resize, Transform from anomalib.data import AnomalibDataModule - - -def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: - """Set a transform for a specific stage in a AnomalibDataModule. - - Args: - datamodule: The AnomalibDataModule to set the transform for. - transform: The transform to set. - stage: The stage to set the transform for. - - Note: - The stage parameter maps to dataset attributes as follows: - - 'fit' -> 'train_data' - - 'validate' -> 'val_data' - - 'test' -> 'test_data' - - 'predict' -> 'test_data' - """ - stage_datasets = { - "fit": "train_data", - "validate": "val_data", - "test": "test_data", - "predict": "test_data", - } - - dataset_attr = stage_datasets.get(stage) - if dataset_attr and hasattr(datamodule, dataset_attr): - dataset = getattr(datamodule, dataset_attr) - if hasattr(dataset, "transform"): - dataset.transform = transform - - -def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: - """Set a transform for a dataloader or list of dataloaders. - - Args: - dataloader: The dataloader(s) to set the transform for. - transform: The transform to set. - """ - if isinstance(dataloader, DataLoader): - if hasattr(dataloader.dataset, "transform"): - dataloader.dataset.transform = transform - elif isinstance(dataloader, Sequence): - for dl in dataloader: - set_dataloader_transform(dl, transform) - else: - msg = f"Unsupported dataloader type: {type(dataloader)}" - raise TypeError(msg) +from anomalib.data.transforms import ExportableCenterCrop def get_stage_transform(stage: str, transforms: dict[str, Transform | None]) -> Transform | None: @@ -159,3 +113,88 @@ def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: di transform = transforms.get(stage_mapping[stage]) if transform is not None: set_dataloader_transform([loader], transform) + + +def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: + """Set a transform for a specific stage in a AnomalibDataModule. + + Args: + datamodule: The AnomalibDataModule to set the transform for. + transform: The transform to set. + stage: The stage to set the transform for. + + Note: + The stage parameter maps to dataset attributes as follows: + - 'fit' -> 'train_data' + - 'validate' -> 'val_data' + - 'test' -> 'test_data' + - 'predict' -> 'test_data' + """ + stage_datasets = { + "fit": "train_data", + "validate": "val_data", + "test": "test_data", + "predict": "test_data", + } + + dataset_attr = stage_datasets.get(stage) + if dataset_attr and hasattr(datamodule, dataset_attr): + dataset = getattr(datamodule, dataset_attr) + if hasattr(dataset, "transform"): + dataset.transform = transform + + +def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: + """Set a transform for a dataloader or list of dataloaders. + + Args: + dataloader: The dataloader(s) to set the transform for. + transform: The transform to set. + """ + if isinstance(dataloader, DataLoader): + if hasattr(dataloader.dataset, "transform"): + dataloader.dataset.transform = transform + elif isinstance(dataloader, Sequence): + for dl in dataloader: + set_dataloader_transform(dl, transform) + else: + msg = f"Unsupported dataloader type: {type(dataloader)}" + raise TypeError(msg) + + +def get_exportable_transform(transform: Transform | None) -> Transform | None: + """Get exportable transform. + + Some transforms are not supported by ONNX/OpenVINO, so we need to replace them with exportable versions. + """ + if transform is None: + return None + transform = disable_antialiasing(transform) + return convert_centercrop(transform) + + +def disable_antialiasing(transform: Transform) -> Transform: + """Disable antialiasing in Resize transforms. + + Resizing with antialiasing is not supported by ONNX, so we need to disable it. + """ + if isinstance(transform, Resize): + transform.antialias = False + if isinstance(transform, Compose): + for tr in transform.transforms: + disable_antialiasing(tr) + return transform + + +def convert_centercrop(transform: Transform) -> Transform: + """Convert CenterCrop to ExportableCenterCrop. + + Torchvision's CenterCrop is not supported by ONNX, so we need to replace it with our own ExportableCenterCrop. + """ + if isinstance(transform, CenterCrop): + transform = ExportableCenterCrop(size=transform.size) + if isinstance(transform, Compose): + for index in range(len(transform.transforms)): + tr = transform.transforms[index] + transform.transforms[index] = convert_centercrop(tr) + return transform From 83c20846f544e3b575ceb03f82a8e579b72fbc96 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 24 Oct 2024 09:14:45 +0100 Subject: [PATCH 30/59] Revert v1 upgrade tool Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 2 +- tests/integration/tools/upgrade/expected_draem_v1.yaml | 6 ++++++ tools/upgrade/config.py | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 1cbbdbb063..3c0fd71637 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -169,7 +169,7 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non pl_module: The Lightning module. stage: The stage (e.g., 'fit', 'validate', 'test', 'predict'). """ - stage = TrainerFn(stage).value # Convert string to TrainerFn enum + stage = TrainerFn(stage).value # Ensure stage is str if hasattr(trainer, "datamodule"): self.setup_transforms(datamodule=trainer.datamodule) diff --git a/tests/integration/tools/upgrade/expected_draem_v1.yaml b/tests/integration/tools/upgrade/expected_draem_v1.yaml index 7084d6e464..d4799adf98 100644 --- a/tests/integration/tools/upgrade/expected_draem_v1.yaml +++ b/tests/integration/tools/upgrade/expected_draem_v1.yaml @@ -3,10 +3,16 @@ data: init_args: root: ./datasets/MVTec category: bottle + image_size: + - 256 + - 256 train_batch_size: 72 eval_batch_size: 32 num_workers: 8 task: segmentation + transform: null + train_transform: null + eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/tools/upgrade/config.py b/tools/upgrade/config.py index 5f1f3278e1..71bf17a4b5 100644 --- a/tools/upgrade/config.py +++ b/tools/upgrade/config.py @@ -27,6 +27,7 @@ import yaml from anomalib.models import convert_snake_to_pascal_case +from anomalib.utils.config import to_tuple def get_class_signature(module_path: str, class_name: str) -> inspect.Signature: @@ -143,6 +144,9 @@ def upgrade_data_config(self) -> dict[str, Any]: self.old_config["dataset"], ) + # Input size is a list in the old config, convert it to a tuple + init_args["image_size"] = to_tuple(init_args["image_size"]) + return { "data": { "class_path": class_path, From e83f9cf614bf3ed135bc3323ff64c6935849808c Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 24 Oct 2024 11:20:12 +0100 Subject: [PATCH 31/59] Fix aupimo notebooks Signed-off-by: Samet Akcay --- notebooks/700_metrics/701a_aupimo.ipynb | 4 +--- notebooks/700_metrics/701b_aupimo_advanced_i.ipynb | 8 ++++---- notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb | 8 ++++---- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/notebooks/700_metrics/701a_aupimo.ipynb b/notebooks/700_metrics/701a_aupimo.ipynb index 5c5497b3b8..4fdb499ae7 100644 --- a/notebooks/700_metrics/701a_aupimo.ipynb +++ b/notebooks/700_metrics/701a_aupimo.ipynb @@ -357,9 +357,7 @@ ")\n", "\n", "for batch in predictions:\n", - " anomaly_maps = batch[\"anomaly_maps\"].squeeze(dim=1)\n", - " masks = batch[\"mask\"]\n", - " aupimo.update(anomaly_maps=anomaly_maps, masks=masks)" + " aupimo.update(anomaly_maps=batch.anomaly_map, masks=batch.gt_mask)" ] }, { diff --git a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb index a785075060..bf7f85f20c 100644 --- a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb +++ b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb @@ -219,10 +219,10 @@ "labels = []\n", "image_paths = []\n", "for batch in predictions:\n", - " anomaly_maps.append(batch_anomaly_maps := batch[\"anomaly_maps\"].squeeze(dim=1))\n", - " masks.append(batch_masks := batch[\"mask\"])\n", - " labels.append(batch[\"label\"])\n", - " image_paths.append(batch[\"image_path\"])\n", + " anomaly_maps.append(batch_anomaly_maps := batch.anomaly_map)\n", + " masks.append(batch_masks := batch.gt_mask)\n", + " labels.append(batch.gt_label)\n", + " image_paths.append(batch.image_path)\n", " aupimo.update(anomaly_maps=batch_anomaly_maps, masks=batch_masks)\n", "\n", "# list[list[str]] -> list[str]\n", diff --git a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb index ed647ef666..5eec1b1c8d 100644 --- a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb +++ b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb @@ -213,10 +213,10 @@ "labels = []\n", "image_paths = []\n", "for batch in predictions:\n", - " anomaly_maps.append(batch_anomaly_maps := batch[\"anomaly_maps\"].squeeze(dim=1))\n", - " masks.append(batch_masks := batch[\"mask\"])\n", - " labels.append(batch[\"label\"])\n", - " image_paths.append(batch[\"image_path\"])\n", + " anomaly_maps.append(batch_anomaly_maps := batch.anomaly_map)\n", + " masks.append(batch_masks := batch.gt_mask)\n", + " labels.append(batch.gt_label)\n", + " image_paths.append(batch.image_path)\n", " aupimo.update(anomaly_maps=batch_anomaly_maps, masks=batch_masks)\n", "\n", "# list[list[str]] -> list[str]\n", From 721e11f30851bb75ecf935c782331bc047dcacf4 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 24 Oct 2024 11:21:15 +0100 Subject: [PATCH 32/59] Add pre-processor unit tests Signed-off-by: Samet Akcay --- .../pre_processing/test_pre_processing.py | 97 +++++++++++++++++ .../pre_processing/utils/test_transform.py | 103 ++++++++++++++++++ 2 files changed, 200 insertions(+) create mode 100644 tests/unit/pre_processing/test_pre_processing.py create mode 100644 tests/unit/pre_processing/utils/test_transform.py diff --git a/tests/unit/pre_processing/test_pre_processing.py b/tests/unit/pre_processing/test_pre_processing.py new file mode 100644 index 0000000000..9f480620b8 --- /dev/null +++ b/tests/unit/pre_processing/test_pre_processing.py @@ -0,0 +1,97 @@ +"""Test the PreProcessor class.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import torch +from torchvision.transforms.v2 import Compose, Resize, ToDtype, ToImage +from torchvision.tv_tensors import Image, Mask + +from anomalib.data import ImageBatch +from anomalib.pre_processing import PreProcessor + + +class TestPreProcessor: + """Test the PreProcessor class.""" + + @pytest.fixture(autouse=True) + def setup(self) -> None: + """Set up test fixtures for each test method.""" + image = Image(torch.rand(3, 256, 256)) + gt_mask = Mask(torch.zeros(256, 256)) + self.dummy_batch = ImageBatch(image=image, gt_mask=gt_mask) + self.common_transform = Compose([Resize((224, 224)), ToImage(), ToDtype(torch.float32, scale=True)]) + + def test_init(self) -> None: + """Test the initialization of the PreProcessor class.""" + # Test with stage-specific transforms + train_transform = Compose([Resize((224, 224)), ToImage(), ToDtype(torch.float32, scale=True)]) + val_transform = Compose([Resize((256, 256)), ToImage(), ToDtype(torch.float32, scale=True)]) + pre_processor = PreProcessor(train_transform=train_transform, val_transform=val_transform) + assert pre_processor.train_transform == train_transform + assert pre_processor.val_transform == val_transform + assert pre_processor.test_transform is None + + # Test with single transform for all stages + pre_processor = PreProcessor(transform=self.common_transform) + assert pre_processor.train_transform == self.common_transform + assert pre_processor.val_transform == self.common_transform + assert pre_processor.test_transform == self.common_transform + + # Test error case: both transform and stage-specific transform + with pytest.raises(ValueError, match="`transforms` cannot be used together with"): + PreProcessor(transform=self.common_transform, train_transform=train_transform) + + def test_forward(self) -> None: + """Test the forward method of the PreProcessor class.""" + pre_processor = PreProcessor(transform=self.common_transform) + processed_batch = pre_processor(self.dummy_batch.image) + assert processed_batch.shape == (1, 3, 224, 224) + + @pytest.mark.parametrize( + "method_name", + ["on_train_batch_start", "on_validation_batch_start", "on_test_batch_start", "on_predict_batch_start"], + ) + def test_callbacks(self, method_name: str) -> None: + """Test the callbacks of the PreProcessor class.""" + pre_processor = PreProcessor(transform=self.common_transform) + method = getattr(pre_processor, method_name) + method(None, None, self.dummy_batch, 0) + assert self.dummy_batch.image.shape == (1, 3, 224, 224) + assert self.dummy_batch.gt_mask.shape == (1, 224, 224) + + def test_no_transform(self) -> None: + """Test no transform.""" + pre_processor = PreProcessor() + processed_batch = pre_processor(self.dummy_batch) + assert processed_batch.image.shape == (1, 3, 256, 256) + assert processed_batch.gt_mask.shape == (1, 256, 256) + + @staticmethod + def test_different_stage_transforms() -> None: + """Test different stage transforms.""" + train_transform = Compose([Resize((224, 224)), ToImage(), ToDtype(torch.float32, scale=True)]) + val_transform = Compose([Resize((256, 256)), ToImage(), ToDtype(torch.float32, scale=True)]) + test_transform = Compose([Resize((288, 288)), ToImage(), ToDtype(torch.float32, scale=True)]) + + pre_processor = PreProcessor( + train_transform=train_transform, + val_transform=val_transform, + test_transform=test_transform, + ) + + # Test train transform + test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) + pre_processor.on_train_batch_start(None, None, test_batch, 0) + assert test_batch.image.shape == (1, 3, 224, 224) + + # Test validation transform + test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) + pre_processor.on_validation_batch_start(None, None, test_batch, 0) + assert test_batch.image.shape == (1, 3, 256, 256) + + # Test test transform + test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) + pre_processor.on_test_batch_start(None, None, test_batch, 0) + assert test_batch.image.shape == (1, 3, 288, 288) diff --git a/tests/unit/pre_processing/utils/test_transform.py b/tests/unit/pre_processing/utils/test_transform.py new file mode 100644 index 0000000000..159cd55b83 --- /dev/null +++ b/tests/unit/pre_processing/utils/test_transform.py @@ -0,0 +1,103 @@ +"""Test the pre-processing transforms utils.""" + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import torch +from torch.utils.data import DataLoader, TensorDataset +from torchvision.transforms.v2 import CenterCrop, Compose, Resize, ToTensor + +from anomalib.data.transforms import ExportableCenterCrop +from anomalib.pre_processing.utils.transform import ( + convert_centercrop, + disable_antialiasing, + get_exportable_transform, + set_dataloader_transform, +) + + +def test_set_dataloader_transform() -> None: + """Test the set_dataloader_transform function.""" + + # Test with single DataLoader + class TransformableDataset(TensorDataset): + def __init__(self, *tensors) -> None: + super().__init__(*tensors) + self.transform = None + + dataset = TransformableDataset(torch.randn(10, 3, 224, 224)) + dataloader = DataLoader(dataset) + transform = ToTensor() + set_dataloader_transform(dataloader, transform) + assert dataloader.dataset.transform == transform + + # Test with sequence of DataLoaders + dataloaders = [DataLoader(TransformableDataset(torch.randn(10, 3, 224, 224))) for _ in range(3)] + set_dataloader_transform(dataloaders, transform) + for dl in dataloaders: + assert dl.dataset.transform == transform + + # Test with unsupported type + with pytest.raises(TypeError): + set_dataloader_transform({"key": "value"}, transform) + + +def test_get_exportable_transform() -> None: + """Test the get_exportable_transform function.""" + # Test with None transform + assert get_exportable_transform(None) is None + + # Test with Resize transform + resize = Resize((224, 224), antialias=True) + exportable_resize = get_exportable_transform(resize) + assert isinstance(exportable_resize, Resize) + assert not exportable_resize.antialias + + # Test with CenterCrop transform + center_crop = CenterCrop((224, 224)) + exportable_center_crop = get_exportable_transform(center_crop) + assert isinstance(exportable_center_crop, ExportableCenterCrop) + + # Test with Compose transform + compose = Compose([Resize((224, 224), antialias=True), CenterCrop((200, 200))]) + exportable_compose = get_exportable_transform(compose) + assert isinstance(exportable_compose, Compose) + assert isinstance(exportable_compose.transforms[0], Resize) + assert not exportable_compose.transforms[0].antialias + assert isinstance(exportable_compose.transforms[1], ExportableCenterCrop) + + +def test_disable_antialiasing() -> None: + """Test the disable_antialiasing function.""" + # Test with Resize transform + resize = Resize((224, 224), antialias=True) + disabled_resize = disable_antialiasing(resize) + assert not disabled_resize.antialias + + # Test with Compose transform + compose = Compose([Resize((224, 224), antialias=True), ToTensor()]) + disabled_compose = disable_antialiasing(compose) + assert not disabled_compose.transforms[0].antialias + + # Test with non-Resize transform + to_tensor = ToTensor() + assert disable_antialiasing(to_tensor) == to_tensor + + +def test_convert_centercrop() -> None: + """Test the convert_centercrop function.""" + # Test with CenterCrop transform + center_crop = CenterCrop((224, 224)) + converted_crop = convert_centercrop(center_crop) + assert isinstance(converted_crop, ExportableCenterCrop) + assert converted_crop.size == list(center_crop.size) + + # Test with Compose transform + compose = Compose([Resize((256, 256)), CenterCrop((224, 224))]) + converted_compose = convert_centercrop(compose) + assert isinstance(converted_compose.transforms[1], ExportableCenterCrop) + + # Test with non-CenterCrop transform + resize = Resize((224, 224)) + assert convert_centercrop(resize) == resize From f937abfbe54eb6f4ea88c2d56dcc5f281021ad52 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 24 Oct 2024 17:36:02 +0100 Subject: [PATCH 33/59] Increase the test coverage for PreProcessor Signed-off-by: Samet Akcay --- .../pre_processing/test_pre_processing.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/unit/pre_processing/test_pre_processing.py b/tests/unit/pre_processing/test_pre_processing.py index 9f480620b8..6cc2a3d3b9 100644 --- a/tests/unit/pre_processing/test_pre_processing.py +++ b/tests/unit/pre_processing/test_pre_processing.py @@ -3,8 +3,11 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +from unittest.mock import MagicMock + import pytest import torch +from torch.utils.data import DataLoader from torchvision.transforms.v2 import Compose, Resize, ToDtype, ToImage from torchvision.tv_tensors import Image, Mask @@ -95,3 +98,52 @@ def test_different_stage_transforms() -> None: test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) pre_processor.on_test_batch_start(None, None, test_batch, 0) assert test_batch.image.shape == (1, 3, 288, 288) + + def test_setup_transforms_from_datamodule(self) -> None: + """Test setup method when transforms are obtained from datamodule.""" + # Mock datamodule with transforms + datamodule = MagicMock() + datamodule.train_transform = self.common_transform + datamodule.eval_transform = self.common_transform + + pre_processor = PreProcessor() + pre_processor.setup_transforms(datamodule=datamodule) + + assert pre_processor.train_transform == self.common_transform + assert pre_processor.val_transform == self.common_transform + assert pre_processor.test_transform == self.common_transform + + def test_setup_transforms_from_dataloaders(self) -> None: + """Test setup method when transforms are obtained from dataloaders.""" + # Mock dataloader with dataset having a transform + dataloader = MagicMock() + dataloader.dataset.transform = self.common_transform + + pre_processor = PreProcessor() + pre_processor.setup_transforms(dataloaders=[dataloader]) + + assert pre_processor.train_transform == self.common_transform + assert pre_processor.val_transform == self.common_transform + assert pre_processor.test_transform == self.common_transform + + def test_setup_transforms_priority(self) -> None: + """Test setup method prioritizes PreProcessor transforms over datamodule/dataloaders.""" + # Mock datamodule + datamodule = MagicMock() + datamodule.train_transform = Compose([Resize((128, 128)), ToImage(), ToDtype(torch.float32, scale=True)]) + datamodule.eval_transform = Compose([Resize((128, 128)), ToImage(), ToDtype(torch.float32, scale=True)]) + + # Mock dataloader + dataset_mock = MagicMock() + dataset_mock.transform = Compose([Resize((64, 64)), ToImage(), ToDtype(torch.float32, scale=True)]) + dataloader = MagicMock(spec=DataLoader) + dataloader.dataset = dataset_mock + + # Initialize PreProcessor with a custom transform + pre_processor = PreProcessor(transform=self.common_transform) + pre_processor.setup_transforms(datamodule=datamodule, dataloaders=[dataloader]) + + # Ensure PreProcessor's own transform is used + assert pre_processor.train_transform == self.common_transform + assert pre_processor.val_transform == self.common_transform + assert pre_processor.test_transform == self.common_transform From ec3e97c7ec622298f547a75bf7c80da86f162756 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:00:15 +0100 Subject: [PATCH 34/59] Add option to disable pre-processor Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 15 +++++++++++++-- src/anomalib/models/image/cfa/lightning_model.py | 2 +- .../models/image/cflow/lightning_model.py | 2 +- .../models/image/csflow/lightning_model.py | 2 +- .../models/image/dfkde/lightning_model.py | 2 +- src/anomalib/models/image/dfm/lightning_model.py | 2 +- .../models/image/draem/lightning_model.py | 2 +- src/anomalib/models/image/dsr/lightning_model.py | 2 +- .../models/image/efficient_ad/lightning_model.py | 2 +- .../models/image/fastflow/lightning_model.py | 2 +- src/anomalib/models/image/fre/lightning_model.py | 2 +- .../models/image/ganomaly/lightning_model.py | 2 +- .../models/image/padim/lightning_model.py | 2 +- .../models/image/patchcore/lightning_model.py | 2 +- .../image/reverse_distillation/lightning_model.py | 2 +- src/anomalib/models/image/rkde/lightning_model.py | 2 +- .../models/image/stfpm/lightning_model.py | 2 +- .../models/image/uflow/lightning_model.py | 2 +- .../models/image/winclip/lightning_model.py | 2 +- .../models/video/ai_vad/lightning_model.py | 2 +- 20 files changed, 32 insertions(+), 21 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index bffc99ff03..0455bbea17 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -41,7 +41,7 @@ class AnomalyModule(ExportMixin, pl.LightningModule, ABC): def __init__( self, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, post_processor: PostProcessor | None = None, ) -> None: super().__init__() @@ -55,7 +55,18 @@ def __init__( self.image_metrics: AnomalibMetricCollection self.pixel_metrics: AnomalibMetricCollection - self.pre_processor = pre_processor or self.configure_pre_processor() + # Handle pre-processor + # True -> use default pre-processor + # False -> no pre-processor + # PreProcessor -> use the provided pre-processor + if isinstance(pre_processor, PreProcessor): + self.pre_processor = pre_processor + elif isinstance(pre_processor, bool): + self.pre_processor = self.configure_pre_processor() + else: + msg = f"Invalid pre-processor type: {type(pre_processor)}" + raise TypeError(msg) + self.post_processor = post_processor or self.default_post_processor() self._input_size: tuple[int, int] | None = None diff --git a/src/anomalib/models/image/cfa/lightning_model.py b/src/anomalib/models/image/cfa/lightning_model.py index 9f8d5818b8..a42b6a172f 100644 --- a/src/anomalib/models/image/cfa/lightning_model.py +++ b/src/anomalib/models/image/cfa/lightning_model.py @@ -56,7 +56,7 @@ def __init__( num_nearest_neighbors: int = 3, num_hard_negative_features: int = 3, radius: float = 1e-5, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) self.model: CfaModel = CfaModel( diff --git a/src/anomalib/models/image/cflow/lightning_model.py b/src/anomalib/models/image/cflow/lightning_model.py index 9995b747b7..5b863a85c3 100644 --- a/src/anomalib/models/image/cflow/lightning_model.py +++ b/src/anomalib/models/image/cflow/lightning_model.py @@ -58,7 +58,7 @@ class Cflow(AnomalyModule): def __init__( self, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, backbone: str = "wide_resnet50_2", layers: Sequence[str] = ("layer2", "layer3", "layer4"), pre_trained: bool = True, diff --git a/src/anomalib/models/image/csflow/lightning_model.py b/src/anomalib/models/image/csflow/lightning_model.py index 00f7eb2dea..fd145ef2e8 100644 --- a/src/anomalib/models/image/csflow/lightning_model.py +++ b/src/anomalib/models/image/csflow/lightning_model.py @@ -45,7 +45,7 @@ def __init__( n_coupling_blocks: int = 4, clamp: int = 3, num_channels: int = 3, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/dfkde/lightning_model.py b/src/anomalib/models/image/dfkde/lightning_model.py index 13143d1b1b..94a3e5eae8 100644 --- a/src/anomalib/models/image/dfkde/lightning_model.py +++ b/src/anomalib/models/image/dfkde/lightning_model.py @@ -47,7 +47,7 @@ def __init__( n_pca_components: int = 16, feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE, max_training_points: int = 40000, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/dfm/lightning_model.py b/src/anomalib/models/image/dfm/lightning_model.py index 104a8221fe..44f5169e58 100644 --- a/src/anomalib/models/image/dfm/lightning_model.py +++ b/src/anomalib/models/image/dfm/lightning_model.py @@ -51,7 +51,7 @@ def __init__( pooling_kernel_size: int = 4, pca_level: float = 0.97, score_type: str = "fre", - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/draem/lightning_model.py b/src/anomalib/models/image/draem/lightning_model.py index 8c70f7e287..205203300c 100644 --- a/src/anomalib/models/image/draem/lightning_model.py +++ b/src/anomalib/models/image/draem/lightning_model.py @@ -48,7 +48,7 @@ def __init__( sspcab_lambda: float = 0.1, anomaly_source_path: str | None = None, beta: float | tuple[float, float] = (0.1, 1.0), - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/dsr/lightning_model.py b/src/anomalib/models/image/dsr/lightning_model.py index 0c3bb46171..d1858b7f7e 100644 --- a/src/anomalib/models/image/dsr/lightning_model.py +++ b/src/anomalib/models/image/dsr/lightning_model.py @@ -50,7 +50,7 @@ def __init__( self, latent_anomaly_strength: float = 0.2, upsampling_train_ratio: float = 0.7, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index b5a7c6a809..e6a10d9195 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -73,7 +73,7 @@ def __init__( weight_decay: float = 0.00001, padding: bool = False, pad_maps: bool = True, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/fastflow/lightning_model.py b/src/anomalib/models/image/fastflow/lightning_model.py index f48992d156..9d25748551 100644 --- a/src/anomalib/models/image/fastflow/lightning_model.py +++ b/src/anomalib/models/image/fastflow/lightning_model.py @@ -47,7 +47,7 @@ def __init__( flow_steps: int = 8, conv3x3_only: bool = False, hidden_ratio: float = 1.0, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/fre/lightning_model.py b/src/anomalib/models/image/fre/lightning_model.py index c88a0f4de4..3265a96ede 100755 --- a/src/anomalib/models/image/fre/lightning_model.py +++ b/src/anomalib/models/image/fre/lightning_model.py @@ -53,7 +53,7 @@ def __init__( pooling_kernel_size: int = 2, input_dim: int = 65536, latent_dim: int = 220, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/ganomaly/lightning_model.py b/src/anomalib/models/image/ganomaly/lightning_model.py index 84fd95738f..04107e4e08 100644 --- a/src/anomalib/models/image/ganomaly/lightning_model.py +++ b/src/anomalib/models/image/ganomaly/lightning_model.py @@ -68,7 +68,7 @@ def __init__( lr: float = 0.0002, beta1: float = 0.5, beta2: float = 0.999, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/padim/lightning_model.py b/src/anomalib/models/image/padim/lightning_model.py index c81438c542..e5d8786f38 100644 --- a/src/anomalib/models/image/padim/lightning_model.py +++ b/src/anomalib/models/image/padim/lightning_model.py @@ -48,7 +48,7 @@ def __init__( layers: list[str] = ["layer1", "layer2", "layer3"], # noqa: B006 pre_trained: bool = True, n_features: int | None = None, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/patchcore/lightning_model.py b/src/anomalib/models/image/patchcore/lightning_model.py index f788870b43..6fdd2564ed 100644 --- a/src/anomalib/models/image/patchcore/lightning_model.py +++ b/src/anomalib/models/image/patchcore/lightning_model.py @@ -51,7 +51,7 @@ def __init__( pre_trained: bool = True, coreset_sampling_ratio: float = 0.1, num_neighbors: int = 9, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/reverse_distillation/lightning_model.py b/src/anomalib/models/image/reverse_distillation/lightning_model.py index 5c7d63f598..0899b8979f 100644 --- a/src/anomalib/models/image/reverse_distillation/lightning_model.py +++ b/src/anomalib/models/image/reverse_distillation/lightning_model.py @@ -45,7 +45,7 @@ def __init__( layers: Sequence[str] = ("layer1", "layer2", "layer3"), anomaly_map_mode: AnomalyMapGenerationMode = AnomalyMapGenerationMode.ADD, pre_trained: bool = True, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/rkde/lightning_model.py b/src/anomalib/models/image/rkde/lightning_model.py index 171b7a48e3..26b1efd8b7 100644 --- a/src/anomalib/models/image/rkde/lightning_model.py +++ b/src/anomalib/models/image/rkde/lightning_model.py @@ -61,7 +61,7 @@ def __init__( n_pca_components: int = 16, feature_scaling_method: FeatureScalingMethod = FeatureScalingMethod.SCALE, max_training_points: int = 40000, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/stfpm/lightning_model.py b/src/anomalib/models/image/stfpm/lightning_model.py index a13b0519b3..5ef7427ed6 100644 --- a/src/anomalib/models/image/stfpm/lightning_model.py +++ b/src/anomalib/models/image/stfpm/lightning_model.py @@ -41,7 +41,7 @@ def __init__( self, backbone: str = "resnet18", layers: Sequence[str] = ("layer1", "layer2", "layer3"), - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) diff --git a/src/anomalib/models/image/uflow/lightning_model.py b/src/anomalib/models/image/uflow/lightning_model.py index a0c2034c36..b820a664b6 100644 --- a/src/anomalib/models/image/uflow/lightning_model.py +++ b/src/anomalib/models/image/uflow/lightning_model.py @@ -38,7 +38,7 @@ def __init__( affine_clamp: float = 2.0, affine_subnet_channels_ratio: float = 1.0, permute_soft: bool = False, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: """Uflow model. diff --git a/src/anomalib/models/image/winclip/lightning_model.py b/src/anomalib/models/image/winclip/lightning_model.py index 50dd43e78a..9e41c690af 100644 --- a/src/anomalib/models/image/winclip/lightning_model.py +++ b/src/anomalib/models/image/winclip/lightning_model.py @@ -54,7 +54,7 @@ def __init__( k_shot: int = 0, scales: tuple = (2, 3), few_shot_source: Path | str | None = None, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) self.model = WinClipModel(scales=scales, apply_transform=False) diff --git a/src/anomalib/models/video/ai_vad/lightning_model.py b/src/anomalib/models/video/ai_vad/lightning_model.py index 7d59307a38..7d824ad9ba 100644 --- a/src/anomalib/models/video/ai_vad/lightning_model.py +++ b/src/anomalib/models/video/ai_vad/lightning_model.py @@ -80,7 +80,7 @@ def __init__( n_components_velocity: int = 2, n_neighbors_pose: int = 1, n_neighbors_deep: int = 1, - pre_processor: PreProcessor | None = None, + pre_processor: PreProcessor | bool = True, ) -> None: super().__init__(pre_processor=pre_processor) From 9b42d5d5ac8047cfa969e23c28b2a7cd6a120e30 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:01:28 +0100 Subject: [PATCH 35/59] Split setup_transforms to setup_datamodule_transforms and setup_dataloader_transforms Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 46 +++++++++---------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 3c0fd71637..17fb0234a7 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -109,35 +109,18 @@ def __init__( self.train_transform = train_transform or transform self.val_transform = val_transform or transform self.test_transform = test_transform or transform - self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) - def setup_transforms( - self, - datamodule: "AnomalibDataModule | None" = None, - dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS | None" = None, - ) -> None: - """Set up and propagate transforms according to priority rules. - - Args: - datamodule: DataModule that might contain transforms. - dataloaders: Dataloaders that might contain transforms. - """ - if isinstance(dataloaders, DataLoader): - dataloaders = [dataloaders] - - # If PreProcessor has transforms, propagate them to datamodule or dataloaders + def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: + """Set up datamodule transforms.""" + # If PreProcessor has transforms, propagate them to datamodule if any([self.train_transform, self.val_transform, self.test_transform]): transforms = { "train": self.train_transform, "val": self.val_transform, "test": self.test_transform, } - - if datamodule: - set_datamodule_transforms(datamodule, transforms) - if dataloaders: - set_dataloaders_transforms(dataloaders, transforms) + set_datamodule_transforms(datamodule, transforms) return # Try to get transforms from datamodule @@ -147,10 +130,24 @@ def setup_transforms( self.train_transform = datamodule_transforms.get("train") self.val_transform = datamodule_transforms.get("val") self.test_transform = datamodule_transforms.get("test") - self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) return + def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS") -> None: + """Set up dataloader transforms.""" + if isinstance(dataloaders, DataLoader): + dataloaders = [dataloaders] + + # If PreProcessor has transforms, propagate them to dataloaders + if any([self.train_transform, self.val_transform, self.test_transform]): + transforms = { + "train": self.train_transform, + "val": self.val_transform, + "test": self.test_transform, + } + set_dataloaders_transforms(dataloaders, transforms) + return + # Try to get transforms from dataloaders if dataloaders: dataloaders_transforms = get_dataloaders_transforms(dataloaders) @@ -158,7 +155,6 @@ def setup_transforms( self.train_transform = dataloaders_transforms.get("train") self.val_transform = dataloaders_transforms.get("val") self.test_transform = dataloaders_transforms.get("test") - self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: @@ -172,10 +168,10 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> Non stage = TrainerFn(stage).value # Ensure stage is str if hasattr(trainer, "datamodule"): - self.setup_transforms(datamodule=trainer.datamodule) + self.setup_datamodule_transforms(datamodule=trainer.datamodule) elif hasattr(trainer, f"{stage}_dataloaders"): dataloaders = getattr(trainer, f"{stage}_dataloaders") - self.setup_transforms(dataloaders=dataloaders) + self.setup_dataloader_transforms(dataloaders=dataloaders) super().setup(trainer, pl_module, stage) From 8c379c017efa56209c9f5b0c3eab5f6c1cb7467d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:03:03 +0100 Subject: [PATCH 36/59] Replace batch.update with in-place batch transforms Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 17fb0234a7..059a82fc7b 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -194,8 +194,7 @@ def on_train_batch_start( """Apply transforms to the training batch.""" del trainer, pl_module, batch_idx # Unused parameters if self.train_transform: - image, gt_mask = self.train_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + batch.image, batch.gt_mask = self.train_transform(batch.image, batch.gt_mask) def on_validation_batch_start( self, @@ -208,8 +207,7 @@ def on_validation_batch_start( """Apply transforms to the validation batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters if self.val_transform: - image, gt_mask = self.val_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + batch.image, batch.gt_mask = self.val_transform(batch.image, batch.gt_mask) def on_test_batch_start( self, @@ -222,8 +220,7 @@ def on_test_batch_start( """Apply transforms to the test batch.""" del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters if self.test_transform: - image, gt_mask = self.test_transform(batch.image, batch.gt_mask) - batch.update(image=image, gt_mask=gt_mask) + batch.image, batch.gt_mask = self.test_transform(batch.image, batch.gt_mask) def on_predict_batch_start( self, From db1d543a050ddb9948937e51b9b0e93f42722414 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:04:05 +0100 Subject: [PATCH 37/59] Remove logger.warning when the default pre-processor is used Signed-off-by: Samet Akcay --- src/anomalib/models/components/base/anomaly_module.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 0455bbea17..54c21b5c02 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -204,11 +204,6 @@ def configure_pre_processor(self, image_size: tuple[int, int] | None = None) -> The default pre-processor is resize to 256x256 and normalize to ImageNet stats. Individual models can override this method to provide custom transforms and pre-processing pipelines. """ - logger.warning( - "No implementation of `configure_pre_processor` was provided in the Lightning model. Using default " - "transforms from the base class. This may not be suitable for your use case. Please override " - "`configure_pre_processor` in your model.", - ) image_size = image_size or (256, 256) return PreProcessor( transform=Compose([ From 9ec2547f4cca41e6b7998168787fd1c515db195d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:13:09 +0100 Subject: [PATCH 38/59] Use predict-transforms explicitly Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 059a82fc7b..dbee50f62e 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -109,6 +109,7 @@ def __init__( self.train_transform = train_transform or transform self.val_transform = val_transform or transform self.test_transform = test_transform or transform + self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: @@ -130,6 +131,7 @@ def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: self.train_transform = datamodule_transforms.get("train") self.val_transform = datamodule_transforms.get("val") self.test_transform = datamodule_transforms.get("test") + self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) return @@ -155,6 +157,7 @@ def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DAT self.train_transform = dataloaders_transforms.get("train") self.val_transform = dataloaders_transforms.get("val") self.test_transform = dataloaders_transforms.get("test") + self.predict_transform = self.test_transform self.exportable_transform = get_exportable_transform(self.test_transform) def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: @@ -231,4 +234,6 @@ def on_predict_batch_start( dataloader_idx: int = 0, ) -> None: """Apply transforms to the predict batch, which is the same as test batch.""" - self.on_test_batch_start(trainer, pl_module, batch, batch_idx, dataloader_idx) + del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters + if self.predict_transform: + batch.image, batch.gt_mask = self.predict_transform(batch.image, batch.gt_mask) From ba240be300cccf1d09c4b082c9fcdc50c1461268 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Fri, 25 Oct 2024 17:18:37 +0100 Subject: [PATCH 39/59] remove pre-processor and configure_transforms from export mixin Signed-off-by: Samet Akcay --- src/anomalib/models/components/base/export_mixin.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/anomalib/models/components/base/export_mixin.py b/src/anomalib/models/components/base/export_mixin.py index 22beae0774..14b5e5def0 100644 --- a/src/anomalib/models/components/base/export_mixin.py +++ b/src/anomalib/models/components/base/export_mixin.py @@ -5,7 +5,7 @@ import json import logging -from collections.abc import Callable, Iterable +from collections.abc import Iterable from pathlib import Path from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Any @@ -21,7 +21,6 @@ from anomalib.data import AnomalibDataModule from anomalib.deploy.export import CompressionType, ExportType from anomalib.metrics import create_metric_collection -from anomalib.pre_processing import PreProcessor if TYPE_CHECKING: from importlib.util import find_spec @@ -38,8 +37,6 @@ class ExportMixin: """This mixin allows exporting models to torch and ONNX/OpenVINO.""" model: nn.Module - pre_processor: PreProcessor - configure_pre_processor: Callable device: torch.device def to_torch( From 6ea536966c0a077c79ab87c3699db802b2a02bf7 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Sat, 26 Oct 2024 07:39:56 +0100 Subject: [PATCH 40/59] Rename set_datamodule_transform to set_datamodule_stage_transform Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/utils/transform.py | 10 +++++----- tests/unit/pre_processing/utils/test_transform.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py index afff8344a9..09538d03e0 100644 --- a/src/anomalib/pre_processing/utils/transform.py +++ b/src/anomalib/pre_processing/utils/transform.py @@ -87,7 +87,7 @@ def set_datamodule_transforms(datamodule: AnomalibDataModule, transforms: dict[s for stage in ["fit", "validate", "test", "predict"]: transform = get_stage_transform(stage, transforms) if transform is not None: - set_datamodule_transform(datamodule, transform, stage) + set_datamodule_stage_transform(datamodule, transform, stage) def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: dict[str, Transform | None]) -> None: @@ -115,7 +115,7 @@ def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: di set_dataloader_transform([loader], transform) -def set_datamodule_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: +def set_datamodule_stage_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: """Set a transform for a specific stage in a AnomalibDataModule. Args: @@ -170,7 +170,7 @@ def get_exportable_transform(transform: Transform | None) -> Transform | None: if transform is None: return None transform = disable_antialiasing(transform) - return convert_centercrop(transform) + return convert_center_crop_transform(transform) def disable_antialiasing(transform: Transform) -> Transform: @@ -186,7 +186,7 @@ def disable_antialiasing(transform: Transform) -> Transform: return transform -def convert_centercrop(transform: Transform) -> Transform: +def convert_center_crop_transform(transform: Transform) -> Transform: """Convert CenterCrop to ExportableCenterCrop. Torchvision's CenterCrop is not supported by ONNX, so we need to replace it with our own ExportableCenterCrop. @@ -196,5 +196,5 @@ def convert_centercrop(transform: Transform) -> Transform: if isinstance(transform, Compose): for index in range(len(transform.transforms)): tr = transform.transforms[index] - transform.transforms[index] = convert_centercrop(tr) + transform.transforms[index] = convert_center_crop_transform(tr) return transform diff --git a/tests/unit/pre_processing/utils/test_transform.py b/tests/unit/pre_processing/utils/test_transform.py index 159cd55b83..6974bcdbc8 100644 --- a/tests/unit/pre_processing/utils/test_transform.py +++ b/tests/unit/pre_processing/utils/test_transform.py @@ -10,7 +10,7 @@ from anomalib.data.transforms import ExportableCenterCrop from anomalib.pre_processing.utils.transform import ( - convert_centercrop, + convert_center_crop_transform, disable_antialiasing, get_exportable_transform, set_dataloader_transform, @@ -89,15 +89,15 @@ def test_convert_centercrop() -> None: """Test the convert_centercrop function.""" # Test with CenterCrop transform center_crop = CenterCrop((224, 224)) - converted_crop = convert_centercrop(center_crop) + converted_crop = convert_center_crop_transform(center_crop) assert isinstance(converted_crop, ExportableCenterCrop) assert converted_crop.size == list(center_crop.size) # Test with Compose transform compose = Compose([Resize((256, 256)), CenterCrop((224, 224))]) - converted_compose = convert_centercrop(compose) + converted_compose = convert_center_crop_transform(compose) assert isinstance(converted_compose.transforms[1], ExportableCenterCrop) # Test with non-CenterCrop transform resize = Resize((224, 224)) - assert convert_centercrop(resize) == resize + assert convert_center_crop_transform(resize) == resize From c71e41c4b058a603620104c2a137c60fed558512 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 13:29:43 +0000 Subject: [PATCH 41/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 72 ------------------- .../data/datamodules/depth/folder_3d.py | 18 ----- .../data/datamodules/depth/mvtec_3d.py | 26 +------ src/anomalib/data/datamodules/image/btech.py | 29 +------- src/anomalib/data/datamodules/image/folder.py | 20 ------ .../data/datamodules/image/kolektor.py | 26 +------ src/anomalib/data/datamodules/image/mvtec.py | 30 +------- src/anomalib/data/datamodules/image/visa.py | 26 +------ src/anomalib/data/datamodules/video/avenue.py | 25 +------ .../data/datamodules/video/shanghaitech.py | 25 +------ .../data/datamodules/video/ucsd_ped.py | 18 ----- 11 files changed, 7 insertions(+), 308 deletions(-) diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index 8869b38ce3..f083de4ac9 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -40,14 +40,6 @@ class AnomalibDataModule(LightningDataModule, ABC): Defaults to ``None``. test_split_ratio (float): Fraction of the train images held out for testing. Defaults to ``None``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. seed (int | None, optional): Seed used during random subset splitting. Defaults to ``None``. """ @@ -61,10 +53,6 @@ def __init__( val_split_ratio: float, test_split_mode: TestSplitMode | str | None = None, test_split_ratio: float | None = None, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, seed: int | None = None, ) -> None: super().__init__() @@ -75,18 +63,8 @@ def __init__( self.test_split_ratio = test_split_ratio self.val_split_mode = ValSplitMode(val_split_mode) self.val_split_ratio = val_split_ratio - self.image_size = image_size self.seed = seed - # set transforms - if bool(train_transform) != bool(eval_transform): - msg = "Only one of train_transform and eval_transform was specified. This is not recommended because \ - it could lead to unexpected behaviour. Please ensure training and eval transforms have the same \ - reshape and normalization characteristics." - logger.warning(msg) - self._train_transform = train_transform or transform - self._eval_transform = eval_transform or transform - self.train_data: AnomalibDataset self.val_data: AnomalibDataset self.test_data: AnomalibDataset @@ -228,56 +206,6 @@ def predict_dataloader(self) -> EVAL_DATALOADERS: """Use the test dataloader for inference unless overridden.""" return self.test_dataloader() - @property - def transform(self) -> Transform: - """Property that returns the user-specified transform for the datamodule, if any. - - This property is accessed by the engine to set the transform for the model. The eval_transform takes precedence - over the train_transform, because the transform that we store in the model is the one that should be used during - inference. - """ - if self._eval_transform: - return self._eval_transform - return None - - @property - def train_transform(self) -> Transform: - """Get the transforms that will be passed to the train dataset. - - If the train_transform is not set, the engine will request the transform from the model. - """ - if self._train_transform: - return self._train_transform - if ( - getattr(self, "trainer", None) - and self.trainer.lightning_module - and hasattr(self.trainer.lightning_module, "pre_processor") - and hasattr(self.trainer.lightning_module.pre_processor, "train_transform") - ): - return self.trainer.lightning_module.pre_processor.train_transform - if self.image_size: - return Resize(self.image_size, antialias=True) - return None - - @property - def eval_transform(self) -> Transform: - """Get the transform that will be passed to the val/test/predict datasets. - - If the eval_transform is not set, the engine will request the transform from the model. - """ - if self._eval_transform: - return self._eval_transform - if ( - getattr(self, "trainer", None) - and self.trainer.lightning_module - and hasattr(self.trainer.lightning_module, "pre_processor") - and hasattr(self.trainer.lightning_module.pre_processor, "test_transform") - ): - return self.trainer.lightning_module.pre_processor.test_transform - if self.image_size: - return Resize(self.image_size, antialias=True) - return None - @classmethod def from_config( cls: type["AnomalibDataModule"], diff --git a/src/anomalib/data/datamodules/depth/folder_3d.py b/src/anomalib/data/datamodules/depth/folder_3d.py index cebea42d02..32b33c07a3 100644 --- a/src/anomalib/data/datamodules/depth/folder_3d.py +++ b/src/anomalib/data/datamodules/depth/folder_3d.py @@ -51,14 +51,6 @@ class Folder3D(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -87,10 +79,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -101,10 +89,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -127,7 +111,6 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = Folder3DDataset( name=self.name, task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -143,7 +126,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = Folder3DDataset( name=self.name, task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/depth/mvtec_3d.py b/src/anomalib/data/datamodules/depth/mvtec_3d.py index 1e5b90e917..1182d71562 100644 --- a/src/anomalib/data/datamodules/depth/mvtec_3d.py +++ b/src/anomalib/data/datamodules/depth/mvtec_3d.py @@ -27,13 +27,7 @@ from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.mvtec_3d import MVTec3DDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - TestSplitMode, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, TestSplitMode, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -62,14 +56,6 @@ class MVTec3D(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -90,10 +76,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -104,10 +86,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -122,14 +100,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = MVTec3DDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTec3DDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/btech.py b/src/anomalib/data/datamodules/image/btech.py index 5abda6156e..51538f5351 100644 --- a/src/anomalib/data/datamodules/image/btech.py +++ b/src/anomalib/data/datamodules/image/btech.py @@ -20,13 +20,7 @@ from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.btech import BTechDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - TestSplitMode, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, TestSplitMode, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -53,14 +47,6 @@ class BTech(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode, optional): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float, optional): Fraction of images from the train set that will be reserved for testing. @@ -79,12 +65,9 @@ class BTech(AnomalibDataModule): >>> datamodule = BTech( ... root="./datasets/BTech", ... category="01", - ... image_size=256, ... train_batch_size=32, ... eval_batch_size=32, ... num_workers=8, - ... transform_config_train=None, - ... transform_config_eval=None, ... ) >>> datamodule.setup() @@ -121,10 +104,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -135,10 +114,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -153,14 +128,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = BTechDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = BTechDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/folder.py b/src/anomalib/data/datamodules/image/folder.py index 7941ba2f7b..94747a0566 100644 --- a/src/anomalib/data/datamodules/image/folder.py +++ b/src/anomalib/data/datamodules/image/folder.py @@ -47,14 +47,6 @@ class Folder(AnomalibDataModule): Defaults to ``8``. task (TaskType, optional): Task type. Could be ``classification``, ``detection`` or ``segmentation``. Defaults to ``segmentation``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -102,8 +94,6 @@ class Folder(AnomalibDataModule): abnormal_dir="crack", task=TaskType.SEGMENTATION, mask_dir=dataset_root / "mask" / "crack", - image_size=256, - normalization=InputNormalizationMethod.NONE, ) folder_datamodule.setup() @@ -136,10 +126,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.FROM_TEST, @@ -164,10 +150,6 @@ def __init__( test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, seed=seed, ) @@ -186,7 +168,6 @@ def _setup(self, _stage: str | None = None) -> None: self.train_data = FolderDataset( name=self.name, task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, normal_dir=self.normal_dir, @@ -199,7 +180,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = FolderDataset( name=self.name, task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, normal_dir=self.normal_dir, diff --git a/src/anomalib/data/datamodules/image/kolektor.py b/src/anomalib/data/datamodules/image/kolektor.py index 2f8dc3b92b..246c0a2fc5 100644 --- a/src/anomalib/data/datamodules/image/kolektor.py +++ b/src/anomalib/data/datamodules/image/kolektor.py @@ -25,13 +25,7 @@ from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.kolektor import KolektorDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - TestSplitMode, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, TestSplitMode, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -56,14 +50,6 @@ class Kolektor(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR`` test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -83,10 +69,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -97,10 +79,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -114,13 +92,11 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = KolektorDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, ) self.test_data = KolektorDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, ) diff --git a/src/anomalib/data/datamodules/image/mvtec.py b/src/anomalib/data/datamodules/image/mvtec.py index 508a582380..91f6287280 100644 --- a/src/anomalib/data/datamodules/image/mvtec.py +++ b/src/anomalib/data/datamodules/image/mvtec.py @@ -33,13 +33,7 @@ from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.mvtec import MVTecDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - TestSplitMode, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, TestSplitMode, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -68,14 +62,6 @@ class MVTec(AnomalibDataModule): Defaults to ``8``. task TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -103,10 +89,6 @@ class MVTec(AnomalibDataModule): >>> datamodule = MVTec(category="cable") - To change the image and batch size: - - >>> datamodule = MVTec(image_size=(512, 512), train_batch_size=16, eval_batch_size=8) - MVTec AD dataset does not provide a validation set. If you would like to use a separate validation set, you can use the ``val_split_mode`` and ``val_split_ratio`` arguments to create a validation set. @@ -129,10 +111,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -142,10 +120,6 @@ def __init__( super().__init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, num_workers=num_workers, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, @@ -172,14 +146,12 @@ def _setup(self, _stage: str | None = None) -> None: """ self.train_data = MVTecDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.root, category=self.category, ) self.test_data = MVTecDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.root, category=self.category, diff --git a/src/anomalib/data/datamodules/image/visa.py b/src/anomalib/data/datamodules/image/visa.py index 30bf945c73..be34f5962d 100644 --- a/src/anomalib/data/datamodules/image/visa.py +++ b/src/anomalib/data/datamodules/image/visa.py @@ -33,13 +33,7 @@ from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.visa import VisaDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - TestSplitMode, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, TestSplitMode, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -66,14 +60,6 @@ class Visa(AnomalibDataModule): Defaults to ``8``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. test_split_mode (TestSplitMode): Setting that determines how the testing subset is obtained. Defaults to ``TestSplitMode.FROM_DIR``. test_split_ratio (float): Fraction of images from the train set that will be reserved for testing. @@ -94,10 +80,6 @@ def __init__( eval_batch_size: int = 32, num_workers: int = 8, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, test_split_mode: TestSplitMode | str = TestSplitMode.FROM_DIR, test_split_ratio: float = 0.2, val_split_mode: ValSplitMode | str = ValSplitMode.SAME_AS_TEST, @@ -108,10 +90,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, test_split_mode=test_split_mode, test_split_ratio=test_split_ratio, val_split_mode=val_split_mode, @@ -127,14 +105,12 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = VisaDataset( task=self.task, - transform=self.train_transform, split=Split.TRAIN, root=self.split_root, category=self.category, ) self.test_data = VisaDataset( task=self.task, - transform=self.eval_transform, split=Split.TEST, root=self.split_root, category=self.category, diff --git a/src/anomalib/data/datamodules/video/avenue.py b/src/anomalib/data/datamodules/video/avenue.py index 8914475081..589acf1584 100644 --- a/src/anomalib/data/datamodules/video/avenue.py +++ b/src/anomalib/data/datamodules/video/avenue.py @@ -27,12 +27,7 @@ from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame from anomalib.data.datasets.video.avenue import AvenueDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, ValSplitMode, download_and_extract logger = logging.getLogger(__name__) @@ -64,14 +59,6 @@ class Avenue(AnomalibVideoDataModule): Defaults to ``VideoTargetFrame.LAST``. task (TaskType): Task type, 'classification', 'detection' or 'segmentation' Defaults to ``TaskType.SEGMENTATION``. - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to ``32``. eval_batch_size (int, optional): Test batch size. @@ -141,10 +128,6 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame | str = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -156,10 +139,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -175,7 +154,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = AvenueDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -186,7 +164,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = AvenueDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datamodules/video/shanghaitech.py b/src/anomalib/data/datamodules/video/shanghaitech.py index b474f09547..bbad0871ef 100644 --- a/src/anomalib/data/datamodules/video/shanghaitech.py +++ b/src/anomalib/data/datamodules/video/shanghaitech.py @@ -26,12 +26,7 @@ from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame from anomalib.data.datasets.video.shanghaitech import ShanghaiTechDataset -from anomalib.data.utils import ( - DownloadInfo, - Split, - ValSplitMode, - download_and_extract, -) +from anomalib.data.utils import DownloadInfo, Split, ValSplitMode, download_and_extract from anomalib.data.utils.video import convert_video logger = logging.getLogger(__name__) @@ -53,14 +48,6 @@ class ShanghaiTech(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task TaskType): Task type, 'classification', 'detection' or 'segmentation' - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to 32. eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. @@ -77,10 +64,6 @@ def __init__( frames_between_clips: int = 1, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 32, eval_batch_size: int = 32, num_workers: int = 8, @@ -92,10 +75,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -112,7 +91,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = ShanghaiTechDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -123,7 +101,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = ShanghaiTechDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, diff --git a/src/anomalib/data/datamodules/video/ucsd_ped.py b/src/anomalib/data/datamodules/video/ucsd_ped.py index 2dd480ef37..312d2464ae 100644 --- a/src/anomalib/data/datamodules/video/ucsd_ped.py +++ b/src/anomalib/data/datamodules/video/ucsd_ped.py @@ -34,14 +34,6 @@ class UCSDped(AnomalibVideoDataModule): frames_between_clips (int, optional): Number of frames between each consecutive video clip. target_frame (VideoTargetFrame): Specifies the target frame in the video clip, used for ground truth retrieval task (TaskType): Task type, 'classification', 'detection' or 'segmentation' - image_size (tuple[int, int], optional): Size to which input images should be resized. - Defaults to ``None``. - transform (Transform, optional): Transforms that should be applied to the input images. - Defaults to ``None``. - train_transform (Transform, optional): Transforms that should be applied to the input images during training. - Defaults to ``None``. - eval_transform (Transform, optional): Transforms that should be applied to the input images during evaluation. - Defaults to ``None``. train_batch_size (int, optional): Training batch size. Defaults to 32. eval_batch_size (int, optional): Test batch size. Defaults to 32. num_workers (int, optional): Number of workers. Defaults to 8. @@ -58,10 +50,6 @@ def __init__( frames_between_clips: int = 10, target_frame: VideoTargetFrame = VideoTargetFrame.LAST, task: TaskType | str = TaskType.SEGMENTATION, - image_size: tuple[int, int] | None = None, - transform: Transform | None = None, - train_transform: Transform | None = None, - eval_transform: Transform | None = None, train_batch_size: int = 8, eval_batch_size: int = 8, num_workers: int = 8, @@ -73,10 +61,6 @@ def __init__( train_batch_size=train_batch_size, eval_batch_size=eval_batch_size, num_workers=num_workers, - image_size=image_size, - transform=transform, - train_transform=train_transform, - eval_transform=eval_transform, val_split_mode=val_split_mode, val_split_ratio=val_split_ratio, seed=seed, @@ -93,7 +77,6 @@ def __init__( def _setup(self, _stage: str | None = None) -> None: self.train_data = UCSDpedDataset( task=self.task, - transform=self.train_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, @@ -104,7 +87,6 @@ def _setup(self, _stage: str | None = None) -> None: self.test_data = UCSDpedDataset( task=self.task, - transform=self.eval_transform, clip_length_in_frames=self.clip_length_in_frames, frames_between_clips=self.frames_between_clips, target_frame=self.target_frame, From b9bb70064e0f37c05cf9845ba980754904f9db9d Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 13:30:04 +0000 Subject: [PATCH 42/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- src/anomalib/data/datamodules/base/image.py | 1 - src/anomalib/data/datamodules/depth/folder_3d.py | 2 -- src/anomalib/data/datamodules/depth/mvtec_3d.py | 2 -- src/anomalib/data/datamodules/image/btech.py | 1 - src/anomalib/data/datamodules/image/folder.py | 2 -- src/anomalib/data/datamodules/image/kolektor.py | 2 -- src/anomalib/data/datamodules/image/mvtec.py | 2 -- src/anomalib/data/datamodules/image/visa.py | 1 - src/anomalib/data/datamodules/video/avenue.py | 1 - src/anomalib/data/datamodules/video/shanghaitech.py | 2 -- src/anomalib/data/datamodules/video/ucsd_ped.py | 2 -- 11 files changed, 18 deletions(-) diff --git a/src/anomalib/data/datamodules/base/image.py b/src/anomalib/data/datamodules/base/image.py index f083de4ac9..8476bf5eeb 100644 --- a/src/anomalib/data/datamodules/base/image.py +++ b/src/anomalib/data/datamodules/base/image.py @@ -12,7 +12,6 @@ from lightning.pytorch.trainer.states import TrainerFn from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from torch.utils.data.dataloader import DataLoader -from torchvision.transforms.v2 import Resize, Transform from anomalib.data.utils import TestSplitMode, ValSplitMode, random_split, split_by_label from anomalib.data.utils.synthetic import SyntheticAnomalyDataset diff --git a/src/anomalib/data/datamodules/depth/folder_3d.py b/src/anomalib/data/datamodules/depth/folder_3d.py index 32b33c07a3..2e2930be26 100644 --- a/src/anomalib/data/datamodules/depth/folder_3d.py +++ b/src/anomalib/data/datamodules/depth/folder_3d.py @@ -8,8 +8,6 @@ from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.folder_3d import Folder3DDataset diff --git a/src/anomalib/data/datamodules/depth/mvtec_3d.py b/src/anomalib/data/datamodules/depth/mvtec_3d.py index 1182d71562..6a497ec952 100644 --- a/src/anomalib/data/datamodules/depth/mvtec_3d.py +++ b/src/anomalib/data/datamodules/depth/mvtec_3d.py @@ -22,8 +22,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.depth.mvtec_3d import MVTec3DDataset diff --git a/src/anomalib/data/datamodules/image/btech.py b/src/anomalib/data/datamodules/image/btech.py index 51538f5351..818c9d71b5 100644 --- a/src/anomalib/data/datamodules/image/btech.py +++ b/src/anomalib/data/datamodules/image/btech.py @@ -14,7 +14,6 @@ from pathlib import Path import cv2 -from torchvision.transforms.v2 import Transform from tqdm import tqdm from anomalib import TaskType diff --git a/src/anomalib/data/datamodules/image/folder.py b/src/anomalib/data/datamodules/image/folder.py index 94747a0566..7fe51c32a0 100644 --- a/src/anomalib/data/datamodules/image/folder.py +++ b/src/anomalib/data/datamodules/image/folder.py @@ -9,8 +9,6 @@ from collections.abc import Sequence from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.folder import FolderDataset diff --git a/src/anomalib/data/datamodules/image/kolektor.py b/src/anomalib/data/datamodules/image/kolektor.py index 246c0a2fc5..c962e4fba7 100644 --- a/src/anomalib/data/datamodules/image/kolektor.py +++ b/src/anomalib/data/datamodules/image/kolektor.py @@ -20,8 +20,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.kolektor import KolektorDataset diff --git a/src/anomalib/data/datamodules/image/mvtec.py b/src/anomalib/data/datamodules/image/mvtec.py index 91f6287280..a465ef52c1 100644 --- a/src/anomalib/data/datamodules/image/mvtec.py +++ b/src/anomalib/data/datamodules/image/mvtec.py @@ -28,8 +28,6 @@ import logging from pathlib import Path -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule from anomalib.data.datasets.image.mvtec import MVTecDataset diff --git a/src/anomalib/data/datamodules/image/visa.py b/src/anomalib/data/datamodules/image/visa.py index be34f5962d..a445349702 100644 --- a/src/anomalib/data/datamodules/image/visa.py +++ b/src/anomalib/data/datamodules/image/visa.py @@ -28,7 +28,6 @@ from pathlib import Path import cv2 -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.image import AnomalibDataModule diff --git a/src/anomalib/data/datamodules/video/avenue.py b/src/anomalib/data/datamodules/video/avenue.py index 589acf1584..86d068e761 100644 --- a/src/anomalib/data/datamodules/video/avenue.py +++ b/src/anomalib/data/datamodules/video/avenue.py @@ -21,7 +21,6 @@ import cv2 import scipy.io -from torchvision.transforms.v2 import Transform from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule diff --git a/src/anomalib/data/datamodules/video/shanghaitech.py b/src/anomalib/data/datamodules/video/shanghaitech.py index bbad0871ef..2b5c6f428c 100644 --- a/src/anomalib/data/datamodules/video/shanghaitech.py +++ b/src/anomalib/data/datamodules/video/shanghaitech.py @@ -20,8 +20,6 @@ from pathlib import Path from shutil import move -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame diff --git a/src/anomalib/data/datamodules/video/ucsd_ped.py b/src/anomalib/data/datamodules/video/ucsd_ped.py index 312d2464ae..4743d17044 100644 --- a/src/anomalib/data/datamodules/video/ucsd_ped.py +++ b/src/anomalib/data/datamodules/video/ucsd_ped.py @@ -7,8 +7,6 @@ from pathlib import Path from shutil import move -from torchvision.transforms.v2 import Transform - from anomalib import TaskType from anomalib.data.datamodules.base.video import AnomalibVideoDataModule from anomalib.data.datasets.base.video import VideoTargetFrame From 185fec8ee696469cb4ddcdc7b8c5079c9282a04f Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 14:30:06 +0000 Subject: [PATCH 43/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 39 +++++++++++++++++-- src/anomalib/pre_processing/pre_processing.py | 12 ------ .../pre_processing/utils/transform.py | 18 --------- 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 54c21b5c02..64937b6213 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -198,11 +198,44 @@ def learning_type(self) -> LearningType: """Learning type of the model.""" raise NotImplementedError - def configure_pre_processor(self, image_size: tuple[int, int] | None = None) -> PreProcessor: # noqa: PLR6301 + @staticmethod + def configure_pre_processor( + image_size: tuple[int, int] | None = None, + **kwargs: Any, # noqa: ANN401, ARG004 + ) -> PreProcessor: """Configure the pre-processor. - The default pre-processor is resize to 256x256 and normalize to ImageNet stats. Individual models can override - this method to provide custom transforms and pre-processing pipelines. + The default pre-processor resizes images to 256x256 and normalizes using ImageNet statistics. + Individual models can override this method to provide custom transforms and pre-processing pipelines. + + Args: + image_size (tuple[int, int] | None, optional): Target size for resizing images. + If None, defaults to (256, 256). Defaults to None. + **kwargs (Any): Additional keyword arguments (unused). + + Returns: + PreProcessor: Configured pre-processor instance. + + Examples: + Get default pre-processor with custom image size: + + >>> preprocessor = AnomalyModule.configure_pre_processor(image_size=(512, 512)) + + Create model with custom pre-processor: + + >>> from torchvision.transforms.v2 import RandomHorizontalFlip + >>> custom_transform = Compose([ + ... Resize((256, 256), antialias=True), + ... CenterCrop((224, 224)), + ... RandomHorizontalFlip(p=0.5), + ... Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ... ]) + >>> preprocessor.train_transform = custom_transform + >>> model = PatchCore(pre_processor=preprocessor) + + Disable pre-processing: + + >>> model = PatchCore(pre_processor=False) """ image_size = image_size or (256, 256) return PreProcessor( diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index dbee50f62e..d5d14cd2f0 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -16,7 +16,6 @@ from .utils.transform import ( get_dataloaders_transforms, - get_datamodule_transforms, get_exportable_transform, set_dataloaders_transforms, set_datamodule_transforms, @@ -124,17 +123,6 @@ def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: set_datamodule_transforms(datamodule, transforms) return - # Try to get transforms from datamodule - if datamodule: - datamodule_transforms = get_datamodule_transforms(datamodule) - if datamodule_transforms: - self.train_transform = datamodule_transforms.get("train") - self.val_transform = datamodule_transforms.get("val") - self.test_transform = datamodule_transforms.get("test") - self.predict_transform = self.test_transform - self.exportable_transform = get_exportable_transform(self.test_transform) - return - def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS") -> None: """Set up dataloader transforms.""" if isinstance(dataloaders, DataLoader): diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py index 09538d03e0..348fff1cbd 100644 --- a/src/anomalib/pre_processing/utils/transform.py +++ b/src/anomalib/pre_processing/utils/transform.py @@ -31,24 +31,6 @@ def get_stage_transform(stage: str, transforms: dict[str, Transform | None]) -> return stage_transforms_mapping.get(stage) -def get_datamodule_transforms(datamodule: AnomalibDataModule) -> dict[str, Transform] | None: - """Get transforms from datamodule if available. - - Args: - datamodule: The datamodule to get transforms from. - - Returns: - Dictionary of transforms if found in datamodule, None otherwise. - """ - if hasattr(datamodule, "train_transform") and hasattr(datamodule, "eval_transform"): - return { - "train": datamodule.train_transform, - "val": datamodule.eval_transform, - "test": datamodule.eval_transform, - } - return None - - def get_dataloaders_transforms(dataloaders: Sequence[DataLoader]) -> dict[str, Transform]: """Get transforms from dataloaders. From 06fd947f7efa6bdca069d57a8e825f7516802387 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 14:41:28 +0000 Subject: [PATCH 44/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 5 +++-- .../models/image/patchcore/lightning_model.py | 16 +++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 64937b6213..a77f23d219 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -198,10 +198,11 @@ def learning_type(self) -> LearningType: """Learning type of the model.""" raise NotImplementedError - @staticmethod + @classmethod def configure_pre_processor( + cls, image_size: tuple[int, int] | None = None, - **kwargs: Any, # noqa: ANN401, ARG004 + **kwargs: Any, # noqa: ANN401, ARG003 ) -> PreProcessor: """Configure the pre-processor. diff --git a/src/anomalib/models/image/patchcore/lightning_model.py b/src/anomalib/models/image/patchcore/lightning_model.py index 6fdd2564ed..c7ba0e7446 100644 --- a/src/anomalib/models/image/patchcore/lightning_model.py +++ b/src/anomalib/models/image/patchcore/lightning_model.py @@ -64,13 +64,19 @@ def __init__( self.coreset_sampling_ratio = coreset_sampling_ratio self.embeddings: list[torch.Tensor] = [] - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + @classmethod + def configure_pre_processor( + cls, + image_size: tuple[int, int] | None = None, + center_crop_size: tuple[int, int] | None = None, + ) -> PreProcessor: """Default transform for Padim.""" image_size = image_size or (256, 256) - # scale center crop size proportional to image size - height, width = image_size - center_crop_size = (int(height * (224 / 256)), int(width * (224 / 256))) + if center_crop_size is None: + # scale center crop size proportional to image size + height, width = image_size + center_crop_size = (int(height * (224 / 256)), int(width * (224 / 256))) + transform = Compose([ Resize(image_size, antialias=True), CenterCrop(center_crop_size), From 079168edc8dcd31088a8196b64a0da77f4332708 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 14:44:57 +0000 Subject: [PATCH 45/59] Remove transforms from datamodules Signed-off-by: Samet Akcay --- src/anomalib/models/components/base/anomaly_module.py | 6 +----- src/anomalib/models/image/efficient_ad/lightning_model.py | 4 ++-- src/anomalib/models/image/uflow/lightning_model.py | 4 ++-- src/anomalib/models/image/winclip/lightning_model.py | 4 ++-- src/anomalib/models/video/ai_vad/lightning_model.py | 4 ++-- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index a77f23d219..e110f783d8 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -199,11 +199,7 @@ def learning_type(self) -> LearningType: raise NotImplementedError @classmethod - def configure_pre_processor( - cls, - image_size: tuple[int, int] | None = None, - **kwargs: Any, # noqa: ANN401, ARG003 - ) -> PreProcessor: + def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: """Configure the pre-processor. The default pre-processor resizes images to 256x256 and normalizes using ImageNet statistics. diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index e6a10d9195..de3035090d 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -208,8 +208,8 @@ def _get_quantiles_of_maps(self, maps: list[torch.Tensor]) -> tuple[torch.Tensor qb = torch.quantile(maps_flat, q=0.995).to(self.device) return qa, qb - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + @classmethod + def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: """Default transform for EfficientAd. Imagenet normalization applied in forward.""" image_size = image_size or (256, 256) transform = Compose([Resize(image_size, antialias=True)]) diff --git a/src/anomalib/models/image/uflow/lightning_model.py b/src/anomalib/models/image/uflow/lightning_model.py index b820a664b6..b696c7411f 100644 --- a/src/anomalib/models/image/uflow/lightning_model.py +++ b/src/anomalib/models/image/uflow/lightning_model.py @@ -78,8 +78,8 @@ def _setup(self) -> None: permute_soft=self.permute_soft, ) - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + @classmethod + def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: """Default pre-processor for UFlow.""" if image_size is not None: logger.warning("Image size is not used in UFlow. The input image size is determined by the model.") diff --git a/src/anomalib/models/image/winclip/lightning_model.py b/src/anomalib/models/image/winclip/lightning_model.py index 9e41c690af..e5c983b28d 100644 --- a/src/anomalib/models/image/winclip/lightning_model.py +++ b/src/anomalib/models/image/winclip/lightning_model.py @@ -175,8 +175,8 @@ def load_state_dict(self, state_dict: OrderedDict[str, Any], strict: bool = True state_dict.update(restore_dict) return super().load_state_dict(state_dict, strict) - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + @classmethod + def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: """Configure the default pre-processor used by the model.""" if image_size is not None: logger.warning("Image size is not used in WinCLIP. The input image size is determined by the model.") diff --git a/src/anomalib/models/video/ai_vad/lightning_model.py b/src/anomalib/models/video/ai_vad/lightning_model.py index 7d824ad9ba..a35ffddadf 100644 --- a/src/anomalib/models/video/ai_vad/lightning_model.py +++ b/src/anomalib/models/video/ai_vad/lightning_model.py @@ -168,8 +168,8 @@ def learning_type(self) -> LearningType: """ return LearningType.ONE_CLASS - @staticmethod - def configure_pre_processor(image_size: tuple[int, int] | None = None) -> PreProcessor: + @classmethod + def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: """Configure the pre-processor for AI-VAD. AI-VAD does not need a pre-processor or transforms, as the region- and From 1f6555ca73722647e90a8ba6251811771a14fa27 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 16:30:11 +0000 Subject: [PATCH 46/59] Remove transform related keys from data configs Signed-off-by: Samet Akcay --- configs/data/avenue.yaml | 3 --- configs/data/btech.yaml | 3 --- configs/data/folder.yaml | 3 --- configs/data/kolektor.yaml | 3 --- configs/data/mvtec.yaml | 3 --- configs/data/mvtec_3d.yaml | 3 --- configs/data/shanghaitech.yaml | 3 --- configs/data/ucsd_ped.yaml | 3 --- configs/data/visa.yaml | 3 --- 9 files changed, 27 deletions(-) diff --git a/configs/data/avenue.yaml b/configs/data/avenue.yaml index 396a9ba6b5..8fb07660ce 100644 --- a/configs/data/avenue.yaml +++ b/configs/data/avenue.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: from_test val_split_ratio: 0.5 seed: null diff --git a/configs/data/btech.yaml b/configs/data/btech.yaml index 22bfd0d8fe..9aa030540c 100644 --- a/configs/data/btech.yaml +++ b/configs/data/btech.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/folder.yaml b/configs/data/folder.yaml index 329fba6520..76be1382a7 100644 --- a/configs/data/folder.yaml +++ b/configs/data/folder.yaml @@ -12,9 +12,6 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/kolektor.yaml b/configs/data/kolektor.yaml index 1b2e6fe6b4..5daec435e4 100644 --- a/configs/data/kolektor.yaml +++ b/configs/data/kolektor.yaml @@ -4,9 +4,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec.yaml b/configs/data/mvtec.yaml index 7728808ece..5fb206e144 100644 --- a/configs/data/mvtec.yaml +++ b/configs/data/mvtec.yaml @@ -6,9 +6,6 @@ init_args: eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/mvtec_3d.yaml b/configs/data/mvtec_3d.yaml index d880f92f8f..f567f80899 100644 --- a/configs/data/mvtec_3d.yaml +++ b/configs/data/mvtec_3d.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test diff --git a/configs/data/shanghaitech.yaml b/configs/data/shanghaitech.yaml index be4da54311..d18e7671dc 100644 --- a/configs/data/shanghaitech.yaml +++ b/configs/data/shanghaitech.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/ucsd_ped.yaml b/configs/data/ucsd_ped.yaml index 009a5ef224..1226e4f149 100644 --- a/configs/data/ucsd_ped.yaml +++ b/configs/data/ucsd_ped.yaml @@ -8,9 +8,6 @@ init_args: train_batch_size: 8 eval_batch_size: 1 num_workers: 8 - transform: null - train_transform: null - eval_transform: null val_split_mode: FROM_TEST val_split_ratio: 0.5 seed: null diff --git a/configs/data/visa.yaml b/configs/data/visa.yaml index c5656a2158..0d94e82fa4 100644 --- a/configs/data/visa.yaml +++ b/configs/data/visa.yaml @@ -5,9 +5,6 @@ init_args: train_batch_size: 32 eval_batch_size: 32 num_workers: 8 - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test From 03196faa179af896b86e1c03024c80f5bfd00fce Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 17:31:19 +0000 Subject: [PATCH 47/59] update preprocessor tests Signed-off-by: Samet Akcay --- .../tools/upgrade/expected_draem_v1.yaml | 8 +---- .../pre_processing/test_pre_processing.py | 36 +++++++------------ tools/upgrade/config.py | 4 --- 3 files changed, 14 insertions(+), 34 deletions(-) diff --git a/tests/integration/tools/upgrade/expected_draem_v1.yaml b/tests/integration/tools/upgrade/expected_draem_v1.yaml index d4799adf98..e89f59beb8 100644 --- a/tests/integration/tools/upgrade/expected_draem_v1.yaml +++ b/tests/integration/tools/upgrade/expected_draem_v1.yaml @@ -3,16 +3,10 @@ data: init_args: root: ./datasets/MVTec category: bottle - image_size: - - 256 - - 256 train_batch_size: 72 eval_batch_size: 32 num_workers: 8 task: segmentation - transform: null - train_transform: null - eval_transform: null test_split_mode: from_dir test_split_ratio: 0.2 val_split_mode: same_as_test @@ -27,7 +21,7 @@ model: beta: - 0.1 - 1.0 - pre_processor: null + pre_processor: true normalization: normalization_method: min_max metrics: diff --git a/tests/unit/pre_processing/test_pre_processing.py b/tests/unit/pre_processing/test_pre_processing.py index 6cc2a3d3b9..b2b93a9021 100644 --- a/tests/unit/pre_processing/test_pre_processing.py +++ b/tests/unit/pre_processing/test_pre_processing.py @@ -59,17 +59,18 @@ def test_forward(self) -> None: def test_callbacks(self, method_name: str) -> None: """Test the callbacks of the PreProcessor class.""" pre_processor = PreProcessor(transform=self.common_transform) + trainer = MagicMock() + pl_module = MagicMock() method = getattr(pre_processor, method_name) - method(None, None, self.dummy_batch, 0) + method(trainer, pl_module, self.dummy_batch, 0) assert self.dummy_batch.image.shape == (1, 3, 224, 224) assert self.dummy_batch.gt_mask.shape == (1, 224, 224) def test_no_transform(self) -> None: """Test no transform.""" pre_processor = PreProcessor() - processed_batch = pre_processor(self.dummy_batch) - assert processed_batch.image.shape == (1, 3, 256, 256) - assert processed_batch.gt_mask.shape == (1, 256, 256) + processed_batch = pre_processor(self.dummy_batch.image) + assert processed_batch.shape == (1, 3, 256, 256) @staticmethod def test_different_stage_transforms() -> None: @@ -84,35 +85,24 @@ def test_different_stage_transforms() -> None: test_transform=test_transform, ) + trainer = MagicMock() + pl_module = MagicMock() + # Test train transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_train_batch_start(None, None, test_batch, 0) + pre_processor.on_train_batch_start(trainer, pl_module, test_batch, 0) assert test_batch.image.shape == (1, 3, 224, 224) # Test validation transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_validation_batch_start(None, None, test_batch, 0) + pre_processor.on_validation_batch_start(trainer, pl_module, test_batch, 0) assert test_batch.image.shape == (1, 3, 256, 256) # Test test transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_test_batch_start(None, None, test_batch, 0) + pre_processor.on_test_batch_start(trainer, pl_module, test_batch, 0) assert test_batch.image.shape == (1, 3, 288, 288) - def test_setup_transforms_from_datamodule(self) -> None: - """Test setup method when transforms are obtained from datamodule.""" - # Mock datamodule with transforms - datamodule = MagicMock() - datamodule.train_transform = self.common_transform - datamodule.eval_transform = self.common_transform - - pre_processor = PreProcessor() - pre_processor.setup_transforms(datamodule=datamodule) - - assert pre_processor.train_transform == self.common_transform - assert pre_processor.val_transform == self.common_transform - assert pre_processor.test_transform == self.common_transform - def test_setup_transforms_from_dataloaders(self) -> None: """Test setup method when transforms are obtained from dataloaders.""" # Mock dataloader with dataset having a transform @@ -120,7 +110,7 @@ def test_setup_transforms_from_dataloaders(self) -> None: dataloader.dataset.transform = self.common_transform pre_processor = PreProcessor() - pre_processor.setup_transforms(dataloaders=[dataloader]) + pre_processor.setup_dataloader_transforms(dataloaders=[dataloader]) assert pre_processor.train_transform == self.common_transform assert pre_processor.val_transform == self.common_transform @@ -141,7 +131,7 @@ def test_setup_transforms_priority(self) -> None: # Initialize PreProcessor with a custom transform pre_processor = PreProcessor(transform=self.common_transform) - pre_processor.setup_transforms(datamodule=datamodule, dataloaders=[dataloader]) + pre_processor.setup_datamodule_transforms(datamodule=datamodule) # Ensure PreProcessor's own transform is used assert pre_processor.train_transform == self.common_transform diff --git a/tools/upgrade/config.py b/tools/upgrade/config.py index 71bf17a4b5..5f1f3278e1 100644 --- a/tools/upgrade/config.py +++ b/tools/upgrade/config.py @@ -27,7 +27,6 @@ import yaml from anomalib.models import convert_snake_to_pascal_case -from anomalib.utils.config import to_tuple def get_class_signature(module_path: str, class_name: str) -> inspect.Signature: @@ -144,9 +143,6 @@ def upgrade_data_config(self) -> dict[str, Any]: self.old_config["dataset"], ) - # Input size is a list in the old config, convert it to a tuple - init_args["image_size"] = to_tuple(init_args["image_size"]) - return { "data": { "class_path": class_path, From d57931277cab865c864c8776e86ba6c5ac7f86cf Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 17:59:13 +0000 Subject: [PATCH 48/59] Remove setup method from the model implementations Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 10 ------- .../models/image/csflow/lightning_model.py | 14 ++++------ .../models/image/fastflow/lightning_model.py | 13 ++++----- .../models/image/ganomaly/lightning_model.py | 27 +++++++++---------- .../reverse_distillation/lightning_model.py | 13 ++++----- .../models/image/stfpm/lightning_model.py | 5 +--- .../models/image/uflow/lightning_model.py | 14 ++++------ 7 files changed, 34 insertions(+), 62 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index e110f783d8..da7e85a89b 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -70,7 +70,6 @@ def __init__( self.post_processor = post_processor or self.default_post_processor() self._input_size: tuple[int, int] | None = None - self._is_setup = False # flag to track if setup has been called from the trainer @property @@ -94,15 +93,6 @@ def _setup(self) -> None: initialization. """ - def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None: - """Called when loading a checkpoint. - - This method is called to ensure that the `TorchModel` is built before - loading the state dict. - """ - del checkpoint # `checkpoint` variable is not used. - self.setup(stage="load_checkpoint") - def configure_callbacks(self) -> Sequence[Callback] | Callback: """Configure default callbacks for AnomalyModule.""" return [self.pre_processor] diff --git a/src/anomalib/models/image/csflow/lightning_model.py b/src/anomalib/models/image/csflow/lightning_model.py index fd145ef2e8..cdc99e76f3 100644 --- a/src/anomalib/models/image/csflow/lightning_model.py +++ b/src/anomalib/models/image/csflow/lightning_model.py @@ -49,20 +49,15 @@ def __init__( ) -> None: super().__init__(pre_processor=pre_processor) + if self.input_size is None: + msg = "CsFlow needs input size to build torch model." + raise ValueError(msg) + self.cross_conv_hidden_channels = cross_conv_hidden_channels self.n_coupling_blocks = n_coupling_blocks self.clamp = clamp self.num_channels = num_channels - self.loss = CsFlowLoss() - - self.model: CsFlowModel - - def _setup(self) -> None: - if self.input_size is None: - msg = "CsFlow needs input size to build torch model." - raise ValueError(msg) - self.model = CsFlowModel( input_size=self.input_size, cross_conv_hidden_channels=self.cross_conv_hidden_channels, @@ -71,6 +66,7 @@ def _setup(self) -> None: num_channels=self.num_channels, ) self.model.feature_extractor.eval() + self.loss = CsFlowLoss() def training_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: """Perform the training step of CS-Flow. diff --git a/src/anomalib/models/image/fastflow/lightning_model.py b/src/anomalib/models/image/fastflow/lightning_model.py index 9d25748551..74e2221288 100644 --- a/src/anomalib/models/image/fastflow/lightning_model.py +++ b/src/anomalib/models/image/fastflow/lightning_model.py @@ -51,20 +51,16 @@ def __init__( ) -> None: super().__init__(pre_processor=pre_processor) + if self.input_size is None: + msg = "Fastflow needs input size to build torch model." + raise ValueError(msg) + self.backbone = backbone self.pre_trained = pre_trained self.flow_steps = flow_steps self.conv3x3_only = conv3x3_only self.hidden_ratio = hidden_ratio - self.model: FastflowModel - self.loss = FastflowLoss() - - def _setup(self) -> None: - if self.input_size is None: - msg = "Fastflow needs input size to build torch model." - raise ValueError(msg) - self.model = FastflowModel( input_size=self.input_size, backbone=self.backbone, @@ -73,6 +69,7 @@ def _setup(self) -> None: conv3x3_only=self.conv3x3_only, hidden_ratio=self.hidden_ratio, ) + self.loss = FastflowLoss() def training_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: """Perform the training step input and return the loss. diff --git a/src/anomalib/models/image/ganomaly/lightning_model.py b/src/anomalib/models/image/ganomaly/lightning_model.py index 04107e4e08..90e5c4f0e2 100644 --- a/src/anomalib/models/image/ganomaly/lightning_model.py +++ b/src/anomalib/models/image/ganomaly/lightning_model.py @@ -72,6 +72,10 @@ def __init__( ) -> None: super().__init__(pre_processor=pre_processor) + if self.input_size is None: + msg = "GANomaly needs input size to build torch model." + raise ValueError(msg) + self.n_features = n_features self.latent_vec_size = latent_vec_size self.extra_layers = extra_layers @@ -83,6 +87,15 @@ def __init__( self.min_scores: torch.Tensor = torch.tensor(float("inf"), dtype=torch.float32) # pylint: disable=not-callable self.max_scores: torch.Tensor = torch.tensor(float("-inf"), dtype=torch.float32) # pylint: disable=not-callable + self.model = GanomalyModel( + input_size=self.input_size, + num_input_channels=3, + n_features=self.n_features, + latent_vec_size=self.latent_vec_size, + extra_layers=self.extra_layers, + add_final_conv_layer=self.add_final_conv_layer, + ) + self.generator_loss = GeneratorLoss(wadv, wcon, wenc) self.discriminator_loss = DiscriminatorLoss() self.automatic_optimization = False @@ -95,20 +108,6 @@ def __init__( self.model: GanomalyModel - def _setup(self) -> None: - if self.input_size is None: - msg = "GANomaly needs input size to build torch model." - raise ValueError(msg) - - self.model = GanomalyModel( - input_size=self.input_size, - num_input_channels=3, - n_features=self.n_features, - latent_vec_size=self.latent_vec_size, - extra_layers=self.extra_layers, - add_final_conv_layer=self.add_final_conv_layer, - ) - def _reset_min_max(self) -> None: """Reset min_max scores.""" self.min_scores = torch.tensor(float("inf"), dtype=torch.float32) # pylint: disable=not-callable diff --git a/src/anomalib/models/image/reverse_distillation/lightning_model.py b/src/anomalib/models/image/reverse_distillation/lightning_model.py index 0899b8979f..0f4d43623e 100644 --- a/src/anomalib/models/image/reverse_distillation/lightning_model.py +++ b/src/anomalib/models/image/reverse_distillation/lightning_model.py @@ -49,19 +49,15 @@ def __init__( ) -> None: super().__init__(pre_processor=pre_processor) + if self.input_size is None: + msg = "Input size is required for Reverse Distillation model." + raise ValueError(msg) + self.backbone = backbone self.pre_trained = pre_trained self.layers = layers self.anomaly_map_mode = anomaly_map_mode - self.model: ReverseDistillationModel - self.loss = ReverseDistillationLoss() - - def _setup(self) -> None: - if self.input_size is None: - msg = "Input size is required for Reverse Distillation model." - raise ValueError(msg) - self.model = ReverseDistillationModel( backbone=self.backbone, pre_trained=self.pre_trained, @@ -69,6 +65,7 @@ def _setup(self) -> None: input_size=self.input_size, anomaly_map_mode=self.anomaly_map_mode, ) + self.loss = ReverseDistillationLoss() def configure_optimizers(self) -> optim.Adam: """Configure optimizers for decoder and bottleneck. diff --git a/src/anomalib/models/image/stfpm/lightning_model.py b/src/anomalib/models/image/stfpm/lightning_model.py index 5ef7427ed6..be7d5b9cb3 100644 --- a/src/anomalib/models/image/stfpm/lightning_model.py +++ b/src/anomalib/models/image/stfpm/lightning_model.py @@ -45,10 +45,7 @@ def __init__( ) -> None: super().__init__(pre_processor=pre_processor) - self.model = STFPMModel( - backbone=backbone, - layers=layers, - ) + self.model = STFPMModel(backbone=backbone, layers=layers) self.loss = STFPMLoss() def training_step(self, batch: Batch, *args, **kwargs) -> STEP_OUTPUT: diff --git a/src/anomalib/models/image/uflow/lightning_model.py b/src/anomalib/models/image/uflow/lightning_model.py index b696c7411f..dabac4cfb3 100644 --- a/src/anomalib/models/image/uflow/lightning_model.py +++ b/src/anomalib/models/image/uflow/lightning_model.py @@ -54,21 +54,16 @@ def __init__( """ super().__init__(pre_processor=pre_processor) + if self.input_size is None: + msg = "Input size is required for UFlow model." + raise ValueError(msg) + self.backbone = backbone self.flow_steps = flow_steps self.affine_clamp = affine_clamp self.affine_subnet_channels_ratio = affine_subnet_channels_ratio self.permute_soft = permute_soft - self.loss = UFlowLoss() - - self.model: UflowModel - - def _setup(self) -> None: - if self.input_size is None: - msg = "Input size is required for UFlow model." - raise ValueError(msg) - self.model = UflowModel( input_size=self.input_size, backbone=self.backbone, @@ -77,6 +72,7 @@ def _setup(self) -> None: affine_subnet_channels_ratio=self.affine_subnet_channels_ratio, permute_soft=self.permute_soft, ) + self.loss = UFlowLoss() @classmethod def configure_pre_processor(cls, image_size: tuple[int, int] | None = None) -> PreProcessor: From 5e82c34be7058323ff570fc83dbb28797fe3a237 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 29 Oct 2024 19:49:53 +0000 Subject: [PATCH 49/59] Remove image size from datamodules in jupyter notebooks Signed-off-by: Samet Akcay --- notebooks/100_datamodules/101_btech.ipynb | 5 ++--- notebooks/100_datamodules/102_mvtec.ipynb | 5 ++--- notebooks/100_datamodules/103_folder.ipynb | 5 ++--- notebooks/100_datamodules/104_tiling.ipynb | 2 +- notebooks/200_models/201_fastflow.ipynb | 5 ++--- notebooks/600_loggers/601_mlflow_logging.ipynb | 5 ++--- notebooks/700_metrics/701a_aupimo.ipynb | 3 +-- notebooks/700_metrics/701b_aupimo_advanced_i.ipynb | 1 - notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb | 1 - 9 files changed, 12 insertions(+), 20 deletions(-) diff --git a/notebooks/100_datamodules/101_btech.ipynb b/notebooks/100_datamodules/101_btech.ipynb index ef188665e6..2b87763ff0 100644 --- a/notebooks/100_datamodules/101_btech.ipynb +++ b/notebooks/100_datamodules/101_btech.ipynb @@ -48,7 +48,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"BTech\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"BTech\"" ] }, { @@ -106,7 +106,6 @@ "btech_datamodule = BTech(\n", " root=dataset_root,\n", " category=\"01\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", @@ -378,7 +377,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/100_datamodules/102_mvtec.ipynb b/notebooks/100_datamodules/102_mvtec.ipynb index 4c274939d6..9081f256ae 100644 --- a/notebooks/100_datamodules/102_mvtec.ipynb +++ b/notebooks/100_datamodules/102_mvtec.ipynb @@ -58,7 +58,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"MVTec\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"MVTec\"" ] }, { @@ -84,7 +84,6 @@ "mvtec_datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", @@ -345,7 +344,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/100_datamodules/103_folder.ipynb b/notebooks/100_datamodules/103_folder.ipynb index 2f642e145a..cff6596890 100644 --- a/notebooks/100_datamodules/103_folder.ipynb +++ b/notebooks/100_datamodules/103_folder.ipynb @@ -42,7 +42,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"hazelnut_toy\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"hazelnut_toy\"" ] }, { @@ -102,7 +102,6 @@ " abnormal_dir=\"crack\",\n", " task=TaskType.SEGMENTATION,\n", " mask_dir=dataset_root / \"mask\" / \"crack\",\n", - " image_size=(256, 256),\n", ")\n", "folder_datamodule.setup()" ] @@ -369,7 +368,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/100_datamodules/104_tiling.ipynb b/notebooks/100_datamodules/104_tiling.ipynb index 949d6f1cf1..dd901c37e7 100644 --- a/notebooks/100_datamodules/104_tiling.ipynb +++ b/notebooks/100_datamodules/104_tiling.ipynb @@ -44,7 +44,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"MVTec\" / \"transistor\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"MVTec\" / \"transistor\"" ] }, { diff --git a/notebooks/200_models/201_fastflow.ipynb b/notebooks/200_models/201_fastflow.ipynb index 4cf8853fb3..bfad523ae0 100644 --- a/notebooks/200_models/201_fastflow.ipynb +++ b/notebooks/200_models/201_fastflow.ipynb @@ -44,7 +44,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"MVTec\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"MVTec\"" ] }, { @@ -120,7 +120,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=0,\n", @@ -555,7 +554,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4, "vscode": { diff --git a/notebooks/600_loggers/601_mlflow_logging.ipynb b/notebooks/600_loggers/601_mlflow_logging.ipynb index f45a7a0e74..452c9de13f 100644 --- a/notebooks/600_loggers/601_mlflow_logging.ipynb +++ b/notebooks/600_loggers/601_mlflow_logging.ipynb @@ -135,7 +135,7 @@ "# NOTE: Provide the path to the dataset root directory.\n", "# If the datasets is not downloaded, it will be downloaded\n", "# to this directory.\n", - "dataset_root = Path.cwd().parent / \"datasets\" / \"MVTec\"" + "dataset_root = Path.cwd().parent.parent / \"datasets\" / \"MVTec\"" ] }, { @@ -197,7 +197,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"bottle\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=24,\n", @@ -421,7 +420,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/notebooks/700_metrics/701a_aupimo.ipynb b/notebooks/700_metrics/701a_aupimo.ipynb index 4fdb499ae7..69bdcffe2d 100644 --- a/notebooks/700_metrics/701a_aupimo.ipynb +++ b/notebooks/700_metrics/701a_aupimo.ipynb @@ -140,7 +140,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", @@ -532,7 +531,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.11.8" }, "orig_nbformat": 4 }, diff --git a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb index bf7f85f20c..5a6475426c 100644 --- a/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb +++ b/notebooks/700_metrics/701b_aupimo_advanced_i.ipynb @@ -164,7 +164,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", diff --git a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb index 5eec1b1c8d..e3eed3b1bb 100644 --- a/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb +++ b/notebooks/700_metrics/701c_aupimo_advanced_ii.ipynb @@ -158,7 +158,6 @@ "datamodule = MVTec(\n", " root=dataset_root,\n", " category=\"leather\",\n", - " image_size=256,\n", " train_batch_size=32,\n", " eval_batch_size=32,\n", " num_workers=8,\n", From a1a054842e0a02188f5be50c559fd53583c1ed61 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Wed, 30 Oct 2024 17:24:34 +0000 Subject: [PATCH 50/59] Modify folder notebook to acccess the batch from dataset not dataloader Signed-off-by: Samet Akcay --- notebooks/100_datamodules/103_folder.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/notebooks/100_datamodules/103_folder.ipynb b/notebooks/100_datamodules/103_folder.ipynb index cff6596890..328a069652 100644 --- a/notebooks/100_datamodules/103_folder.ipynb +++ b/notebooks/100_datamodules/103_folder.ipynb @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -91,7 +91,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": {}, "outputs": [], "source": [ @@ -113,7 +113,7 @@ "outputs": [], "source": [ "# Train images\n", - "i, data = next(enumerate(folder_datamodule.train_dataloader()))\n", + "data = next(iter(folder_datamodule.train_data))\n", "print(data.image.shape)" ] }, @@ -124,7 +124,7 @@ "outputs": [], "source": [ "# Test images\n", - "i, data = next(enumerate(folder_datamodule.test_dataloader()))\n", + "data = next(iter(folder_datamodule.test_data))\n", "print(data.image.shape, data.gt_mask.shape)" ] }, @@ -142,8 +142,8 @@ "metadata": {}, "outputs": [], "source": [ - "img = to_pil_image(data.image[0].clone())\n", - "msk = to_pil_image(data.gt_mask[0].int() * 255).convert(\"RGB\")\n", + "img = to_pil_image(data.image.clone())\n", + "msk = to_pil_image(data.gt_mask.int() * 255).convert(\"RGB\")\n", "\n", "Image.fromarray(np.hstack((np.array(img), np.array(msk))))" ] @@ -186,7 +186,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ From 0ab0a7116b4576cc614999fe3cd633569c29a66a Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Mon, 4 Nov 2024 18:06:12 +0000 Subject: [PATCH 51/59] Create resolve preprocessor method Signed-off-by: Samet Akcay --- .../models/components/base/anomaly_module.py | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index da7e85a89b..c008f7002a 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -55,18 +55,7 @@ def __init__( self.image_metrics: AnomalibMetricCollection self.pixel_metrics: AnomalibMetricCollection - # Handle pre-processor - # True -> use default pre-processor - # False -> no pre-processor - # PreProcessor -> use the provided pre-processor - if isinstance(pre_processor, PreProcessor): - self.pre_processor = pre_processor - elif isinstance(pre_processor, bool): - self.pre_processor = self.configure_pre_processor() - else: - msg = f"Invalid pre-processor type: {type(pre_processor)}" - raise TypeError(msg) - + self.pre_processor = self._resolve_pre_processor(pre_processor) self.post_processor = post_processor or self.default_post_processor() self._input_size: tuple[int, int] | None = None @@ -93,6 +82,25 @@ def _setup(self) -> None: initialization. """ + def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProcessor: + """Resolve and validate which pre-processor to use.. + + Args: + pre_processor: Pre-processor configuration + - True -> use default pre-processor + - False -> no pre-processor + - PreProcessor -> use the provided pre-processor + + Returns: + Configured pre-processor + """ + if isinstance(pre_processor, PreProcessor): + return pre_processor + if isinstance(pre_processor, bool): + return self.configure_pre_processor() + msg = f"Invalid pre-processor type: {type(pre_processor)}" + raise TypeError(msg) + def configure_callbacks(self) -> Sequence[Callback] | Callback: """Configure default callbacks for AnomalyModule.""" return [self.pre_processor] @@ -250,7 +258,7 @@ def input_size(self) -> tuple[int, int] | None: The effective input size is the size of the input tensor after the transform has been applied. If the transform is not set, or if the transform does not change the shape of the input tensor, this method will return None. """ - transform = self.pre_processor.train_transform + transform = self.pre_processor.predict_transform if transform is None: return None dummy_input = torch.zeros(1, 3, 1, 1) From 401fbaa15374aa2ea00c5c29da72ec0548296b45 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 14:21:08 +0000 Subject: [PATCH 52/59] Return if is Signed-off-by: Samet Akcay --- src/anomalib/models/components/base/anomaly_module.py | 8 ++++---- src/anomalib/models/image/efficient_ad/lightning_model.py | 2 +- src/anomalib/models/image/winclip/lightning_model.py | 5 ++++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index c008f7002a..6fbb9ae2ae 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -82,7 +82,7 @@ def _setup(self) -> None: initialization. """ - def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProcessor: + def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProcessor | None: """Resolve and validate which pre-processor to use.. Args: @@ -97,13 +97,13 @@ def _resolve_pre_processor(self, pre_processor: PreProcessor | bool) -> PreProce if isinstance(pre_processor, PreProcessor): return pre_processor if isinstance(pre_processor, bool): - return self.configure_pre_processor() + return self.configure_pre_processor() if pre_processor else None msg = f"Invalid pre-processor type: {type(pre_processor)}" raise TypeError(msg) def configure_callbacks(self) -> Sequence[Callback] | Callback: """Configure default callbacks for AnomalyModule.""" - return [self.pre_processor] + return [self.pre_processor] if self.pre_processor else [] def forward(self, batch: torch.Tensor, *args, **kwargs) -> InferenceBatch: """Perform the forward-pass by passing input tensor to the module. @@ -258,7 +258,7 @@ def input_size(self) -> tuple[int, int] | None: The effective input size is the size of the input tensor after the transform has been applied. If the transform is not set, or if the transform does not change the shape of the input tensor, this method will return None. """ - transform = self.pre_processor.predict_transform + transform = self.pre_processor.predict_transform if self.pre_processor else None if transform is None: return None dummy_input = torch.zeros(1, 3, 1, 1) diff --git a/src/anomalib/models/image/efficient_ad/lightning_model.py b/src/anomalib/models/image/efficient_ad/lightning_model.py index de3035090d..9d4bb64a24 100644 --- a/src/anomalib/models/image/efficient_ad/lightning_model.py +++ b/src/anomalib/models/image/efficient_ad/lightning_model.py @@ -256,7 +256,7 @@ def on_train_start(self) -> None: msg = "train_batch_size for EfficientAd should be 1." raise ValueError(msg) - if self.pre_processor.train_transform: + if self.pre_processor and self.pre_processor.train_transform: transforms = self.pre_processor.train_transform.transforms if transforms and any(isinstance(transform, Normalize) for transform in transforms): msg = "Transforms for EfficientAd should not contain Normalize." diff --git a/src/anomalib/models/image/winclip/lightning_model.py b/src/anomalib/models/image/winclip/lightning_model.py index e5c983b28d..587362aba1 100644 --- a/src/anomalib/models/image/winclip/lightning_model.py +++ b/src/anomalib/models/image/winclip/lightning_model.py @@ -79,7 +79,10 @@ def _setup(self) -> None: if self.k_shot: if self.few_shot_source: logger.info("Loading reference images from %s", self.few_shot_source) - reference_dataset = PredictDataset(self.few_shot_source, transform=self.pre_processor.test_transform) + reference_dataset = PredictDataset( + self.few_shot_source, + transform=self.pre_processor.test_transform if self.pre_processor else None, + ) dataloader = DataLoader(reference_dataset, batch_size=1, shuffle=False) else: logger.info("Collecting reference images from training dataset") From 9b45def6dbc9bc2a373b8f470ab7a482b77ae3f2 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 14:22:53 +0000 Subject: [PATCH 53/59] Rename self.exportable_transform to self.export_transform Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index d5d14cd2f0..df33e9c02f 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -109,7 +109,7 @@ def __init__( self.val_transform = val_transform or transform self.test_transform = test_transform or transform self.predict_transform = self.test_transform - self.exportable_transform = get_exportable_transform(self.test_transform) + self.export_transform = get_exportable_transform(self.test_transform) def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: """Set up datamodule transforms.""" @@ -146,7 +146,7 @@ def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DAT self.val_transform = dataloaders_transforms.get("val") self.test_transform = dataloaders_transforms.get("test") self.predict_transform = self.test_transform - self.exportable_transform = get_exportable_transform(self.test_transform) + self.export_transform = get_exportable_transform(self.test_transform) def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None: """Configure transforms at the start of each stage. @@ -173,7 +173,7 @@ def forward(self, batch: torch.Tensor) -> torch.Tensor: Within the Lightning training/validation/testing loops, the transforms are applied in the `on_*_batch_start` methods. """ - return self.exportable_transform(batch) if self.exportable_transform else batch + return self.export_transform(batch) if self.export_transform else batch def on_train_batch_start( self, From f5fbb7c78f4a733e56c9a652d73aaf96014590c4 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 14:32:52 +0000 Subject: [PATCH 54/59] Remove set_datamodule_transforms Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 9 +++++++-- src/anomalib/pre_processing/utils/transform.py | 13 ------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index df33e9c02f..1bd9b1ad57 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -17,8 +17,9 @@ from .utils.transform import ( get_dataloaders_transforms, get_exportable_transform, + get_stage_transform, set_dataloaders_transforms, - set_datamodule_transforms, + set_datamodule_stage_transform, ) if TYPE_CHECKING: @@ -120,7 +121,11 @@ def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: "val": self.val_transform, "test": self.test_transform, } - set_datamodule_transforms(datamodule, transforms) + + for stage in ["fit", "validate", "test", "predict"]: + transform = get_stage_transform(stage, transforms) + if transform is not None: + set_datamodule_stage_transform(datamodule, transform, stage) return def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS") -> None: diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py index 348fff1cbd..74c461b404 100644 --- a/src/anomalib/pre_processing/utils/transform.py +++ b/src/anomalib/pre_processing/utils/transform.py @@ -59,19 +59,6 @@ def get_dataloaders_transforms(dataloaders: Sequence[DataLoader]) -> dict[str, T return transforms -def set_datamodule_transforms(datamodule: AnomalibDataModule, transforms: dict[str, Transform | None]) -> None: - """Set transforms to a datamodule. - - Args: - datamodule: The datamodule to propagate transforms to. - transforms: Dictionary mapping stages to their transforms. - """ - for stage in ["fit", "validate", "test", "predict"]: - transform = get_stage_transform(stage, transforms) - if transform is not None: - set_datamodule_stage_transform(datamodule, transform, stage) - - def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: dict[str, Transform | None]) -> None: """Set transforms to dataloaders. From 9cc11d0a80dff7d0fb0caac943cc400148580cd0 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 14:42:32 +0000 Subject: [PATCH 55/59] remove hooks as they are not needed anymore Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 53 ------------------- 1 file changed, 53 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 1bd9b1ad57..6e0e505123 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -12,8 +12,6 @@ from torch.utils.data import DataLoader from torchvision.transforms.v2 import Transform -from anomalib.data.dataclasses.torch.base import Batch - from .utils.transform import ( get_dataloaders_transforms, get_exportable_transform, @@ -179,54 +177,3 @@ def forward(self, batch: torch.Tensor) -> torch.Tensor: in the `on_*_batch_start` methods. """ return self.export_transform(batch) if self.export_transform else batch - - def on_train_batch_start( - self, - trainer: Trainer, - pl_module: LightningModule, - batch: Batch, - batch_idx: int, - ) -> None: - """Apply transforms to the training batch.""" - del trainer, pl_module, batch_idx # Unused parameters - if self.train_transform: - batch.image, batch.gt_mask = self.train_transform(batch.image, batch.gt_mask) - - def on_validation_batch_start( - self, - trainer: Trainer, - pl_module: LightningModule, - batch: Batch, - batch_idx: int, - dataloader_idx: int = 0, - ) -> None: - """Apply transforms to the validation batch.""" - del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.val_transform: - batch.image, batch.gt_mask = self.val_transform(batch.image, batch.gt_mask) - - def on_test_batch_start( - self, - trainer: Trainer, - pl_module: LightningModule, - batch: Batch, - batch_idx: int, - dataloader_idx: int = 0, - ) -> None: - """Apply transforms to the test batch.""" - del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.test_transform: - batch.image, batch.gt_mask = self.test_transform(batch.image, batch.gt_mask) - - def on_predict_batch_start( - self, - trainer: Trainer, - pl_module: LightningModule, - batch: Batch, - batch_idx: int, - dataloader_idx: int = 0, - ) -> None: - """Apply transforms to the predict batch, which is the same as test batch.""" - del trainer, pl_module, batch_idx, dataloader_idx # Unused parameters - if self.predict_transform: - batch.image, batch.gt_mask = self.predict_transform(batch.image, batch.gt_mask) From 05c86dad00e183a4c1b74eb181268247a2b0ded6 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 15:20:03 +0000 Subject: [PATCH 56/59] Fix pre-processor tests Signed-off-by: Samet Akcay --- .../pre_processing/test_pre_processing.py | 34 ++++++------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/tests/unit/pre_processing/test_pre_processing.py b/tests/unit/pre_processing/test_pre_processing.py index b2b93a9021..36394d54a3 100644 --- a/tests/unit/pre_processing/test_pre_processing.py +++ b/tests/unit/pre_processing/test_pre_processing.py @@ -50,26 +50,14 @@ def test_forward(self) -> None: """Test the forward method of the PreProcessor class.""" pre_processor = PreProcessor(transform=self.common_transform) processed_batch = pre_processor(self.dummy_batch.image) + assert isinstance(processed_batch, torch.Tensor) assert processed_batch.shape == (1, 3, 224, 224) - @pytest.mark.parametrize( - "method_name", - ["on_train_batch_start", "on_validation_batch_start", "on_test_batch_start", "on_predict_batch_start"], - ) - def test_callbacks(self, method_name: str) -> None: - """Test the callbacks of the PreProcessor class.""" - pre_processor = PreProcessor(transform=self.common_transform) - trainer = MagicMock() - pl_module = MagicMock() - method = getattr(pre_processor, method_name) - method(trainer, pl_module, self.dummy_batch, 0) - assert self.dummy_batch.image.shape == (1, 3, 224, 224) - assert self.dummy_batch.gt_mask.shape == (1, 224, 224) - def test_no_transform(self) -> None: """Test no transform.""" pre_processor = PreProcessor() processed_batch = pre_processor(self.dummy_batch.image) + assert isinstance(processed_batch, torch.Tensor) assert processed_batch.shape == (1, 3, 256, 256) @staticmethod @@ -85,23 +73,23 @@ def test_different_stage_transforms() -> None: test_transform=test_transform, ) - trainer = MagicMock() - pl_module = MagicMock() - # Test train transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_train_batch_start(trainer, pl_module, test_batch, 0) - assert test_batch.image.shape == (1, 3, 224, 224) + processed_batch = pre_processor.train_transform(test_batch.image) + assert isinstance(processed_batch, torch.Tensor) + assert processed_batch.shape == (1, 3, 224, 224) # Test validation transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_validation_batch_start(trainer, pl_module, test_batch, 0) - assert test_batch.image.shape == (1, 3, 256, 256) + processed_batch = pre_processor.val_transform(test_batch.image) + assert isinstance(processed_batch, torch.Tensor) + assert processed_batch.shape == (1, 3, 256, 256) # Test test transform test_batch = ImageBatch(image=Image(torch.rand(3, 256, 256)), gt_mask=Mask(torch.zeros(256, 256))) - pre_processor.on_test_batch_start(trainer, pl_module, test_batch, 0) - assert test_batch.image.shape == (1, 3, 288, 288) + processed_batch = pre_processor.test_transform(test_batch.image) + assert isinstance(processed_batch, torch.Tensor) + assert processed_batch.shape == (1, 3, 288, 288) def test_setup_transforms_from_dataloaders(self) -> None: """Test setup method when transforms are obtained from dataloaders.""" From 3eecd89afb83d6ae09182cc2bf77df3fcd2c581e Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 5 Nov 2024 15:52:44 +0000 Subject: [PATCH 57/59] remove transform getter util function Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 6 +- .../pre_processing/utils/transform.py | 55 ++++++------------- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 6e0e505123..705d1503d7 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -15,7 +15,6 @@ from .utils.transform import ( get_dataloaders_transforms, get_exportable_transform, - get_stage_transform, set_dataloaders_transforms, set_datamodule_stage_transform, ) @@ -118,13 +117,12 @@ def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: "train": self.train_transform, "val": self.val_transform, "test": self.test_transform, + "predict": self.predict_transform, } - for stage in ["fit", "validate", "test", "predict"]: - transform = get_stage_transform(stage, transforms) + for stage, transform in transforms.items(): if transform is not None: set_datamodule_stage_transform(datamodule, transform, stage) - return def setup_dataloader_transforms(self, dataloaders: "EVAL_DATALOADERS | TRAIN_DATALOADERS") -> None: """Set up dataloader transforms.""" diff --git a/src/anomalib/pre_processing/utils/transform.py b/src/anomalib/pre_processing/utils/transform.py index 74c461b404..37eb1e9dd1 100644 --- a/src/anomalib/pre_processing/utils/transform.py +++ b/src/anomalib/pre_processing/utils/transform.py @@ -12,25 +12,6 @@ from anomalib.data.transforms import ExportableCenterCrop -def get_stage_transform(stage: str, transforms: dict[str, Transform | None]) -> Transform | None: - """Get the transform for a specific stage. - - Args: - stage: The stage to get the transform for (fit, validate, test, predict). - transforms: Dictionary mapping stage names to transforms. - - Returns: - Transform for the specified stage, or None if not found. - """ - stage_transforms_mapping = { - "fit": transforms.get("train"), - "validate": transforms.get("val"), - "test": transforms.get("test"), - "predict": transforms.get("test"), # predict uses test transform - } - return stage_transforms_mapping.get(stage) - - def get_dataloaders_transforms(dataloaders: Sequence[DataLoader]) -> dict[str, Transform]: """Get transforms from dataloaders. @@ -84,6 +65,24 @@ def set_dataloaders_transforms(dataloaders: Sequence[DataLoader], transforms: di set_dataloader_transform([loader], transform) +def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: + """Set a transform for a dataloader or list of dataloaders. + + Args: + dataloader: The dataloader(s) to set the transform for. + transform: The transform to set. + """ + if isinstance(dataloader, DataLoader): + if hasattr(dataloader.dataset, "transform"): + dataloader.dataset.transform = transform + elif isinstance(dataloader, Sequence): + for dl in dataloader: + set_dataloader_transform(dl, transform) + else: + msg = f"Unsupported dataloader type: {type(dataloader)}" + raise TypeError(msg) + + def set_datamodule_stage_transform(datamodule: AnomalibDataModule, transform: Transform, stage: str) -> None: """Set a transform for a specific stage in a AnomalibDataModule. @@ -113,24 +112,6 @@ def set_datamodule_stage_transform(datamodule: AnomalibDataModule, transform: Tr dataset.transform = transform -def set_dataloader_transform(dataloader: DataLoader | Sequence[DataLoader], transform: Transform) -> None: - """Set a transform for a dataloader or list of dataloaders. - - Args: - dataloader: The dataloader(s) to set the transform for. - transform: The transform to set. - """ - if isinstance(dataloader, DataLoader): - if hasattr(dataloader.dataset, "transform"): - dataloader.dataset.transform = transform - elif isinstance(dataloader, Sequence): - for dl in dataloader: - set_dataloader_transform(dl, transform) - else: - msg = f"Unsupported dataloader type: {type(dataloader)}" - raise TypeError(msg) - - def get_exportable_transform(transform: Transform | None) -> Transform | None: """Get exportable transform. From ad43f40389db898d93f9f8243f8872258d4e7593 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 7 Nov 2024 18:13:52 +0000 Subject: [PATCH 58/59] Fix transform dict to setup datamodule transforms Signed-off-by: Samet Akcay --- src/anomalib/pre_processing/pre_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/anomalib/pre_processing/pre_processing.py b/src/anomalib/pre_processing/pre_processing.py index 705d1503d7..27cffc7605 100644 --- a/src/anomalib/pre_processing/pre_processing.py +++ b/src/anomalib/pre_processing/pre_processing.py @@ -114,7 +114,7 @@ def setup_datamodule_transforms(self, datamodule: "AnomalibDataModule") -> None: # If PreProcessor has transforms, propagate them to datamodule if any([self.train_transform, self.val_transform, self.test_transform]): transforms = { - "train": self.train_transform, + "fit": self.train_transform, "val": self.val_transform, "test": self.test_transform, "predict": self.predict_transform, From b99717c6e7913d094b56c37ce7037c2c4da0fd58 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Thu, 7 Nov 2024 21:00:00 +0000 Subject: [PATCH 59/59] Fix Fastflow notebook Signed-off-by: Samet Akcay --- notebooks/200_models/201_fastflow.ipynb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/notebooks/200_models/201_fastflow.ipynb b/notebooks/200_models/201_fastflow.ipynb index 7476f091d6..492655f010 100644 --- a/notebooks/200_models/201_fastflow.ipynb +++ b/notebooks/200_models/201_fastflow.ipynb @@ -318,7 +318,9 @@ }, "outputs": [], "source": [ - "inference_dataset = PredictDataset(path=dataset_root / \"bottle/test/broken_large/000.png\")\n", + "pre_processor = Fastflow.configure_pre_processor()\n", + "transform = pre_processor.predict_transform\n", + "inference_dataset = PredictDataset(path=dataset_root / \"bottle/test/broken_large/000.png\", transform=transform)\n", "inference_dataloader = DataLoader(dataset=inference_dataset, collate_fn=inference_dataset.collate_fn)" ] },