julien.blanchon
add app
c8c12e9
"""PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization.
Paper https://arxiv.org/abs/2011.08785
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
from typing import List, Union
import torch
from omegaconf import DictConfig, ListConfig
from torch import Tensor
from anomalib.models.components import AnomalyModule
from anomalib.models.padim.torch_model import PadimModel
logger = logging.getLogger(__name__)
__all__ = ["PadimLightning"]
class PadimLightning(AnomalyModule):
"""PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization.
Args:
hparams (Union[DictConfig, ListConfig]): Model params
"""
def __init__(self, hparams: Union[DictConfig, ListConfig]):
super().__init__(hparams)
logger.info("Initializing Padim Lightning model.")
self.layers = hparams.model.layers
self.model: PadimModel = PadimModel(
layers=hparams.model.layers,
input_size=hparams.model.input_size,
tile_size=hparams.dataset.tiling.tile_size,
tile_stride=hparams.dataset.tiling.stride,
apply_tiling=hparams.dataset.tiling.apply,
backbone=hparams.model.backbone,
).eval()
self.stats: List[Tensor] = []
self.embeddings: List[Tensor] = []
@staticmethod
def configure_optimizers(): # pylint: disable=arguments-differ
"""PADIM doesn't require optimization, therefore returns no optimizers."""
return None
def training_step(self, batch, _batch_idx): # pylint: disable=arguments-differ
"""Training Step of PADIM. For each batch, hierarchical features are extracted from the CNN.
Args:
batch (Dict[str, Any]): Batch containing image filename, image, label and mask
_batch_idx: Index of the batch.
Returns:
Hierarchical feature map
"""
self.model.feature_extractor.eval()
embedding = self.model(batch["image"])
# NOTE: `self.embedding` appends each batch embedding to
# store the training set embedding. We manually append these
# values mainly due to the new order of hooks introduced after PL v1.4.0
# https://github.com/PyTorchLightning/pytorch-lightning/pull/7357
self.embeddings.append(embedding.cpu())
def on_validation_start(self) -> None:
"""Fit a Gaussian to the embedding collected from the training set."""
# NOTE: Previous anomalib versions fit Gaussian at the end of the epoch.
# This is not possible anymore with PyTorch Lightning v1.4.0 since validation
# is run within train epoch.
logger.info("Aggregating the embedding extracted from the training set.")
embeddings = torch.vstack(self.embeddings)
logger.info("Fitting a Gaussian to the embedding collected from the training set.")
self.stats = self.model.gaussian.fit(embeddings)
def validation_step(self, batch, _): # pylint: disable=arguments-differ
"""Validation Step of PADIM.
Similar to the training step, hierarchical features are extracted from the CNN for each batch.
Args:
batch: Input batch
_: Index of the batch.
Returns:
Dictionary containing images, features, true labels and masks.
These are required in `validation_epoch_end` for feature concatenation.
"""
batch["anomaly_maps"] = self.model(batch["image"])
return batch