forked from openvinotoolkit/anomalib
-
Notifications
You must be signed in to change notification settings - Fork 1
/
anomaly_map.py
135 lines (97 loc) · 4.62 KB
/
anomaly_map.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
"""Anomaly Map Generator for the PaDiM model implementation."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import torch
import torch.nn.functional as F
from omegaconf import ListConfig
from torch import Tensor, nn
from anomalib.models.components import GaussianBlur2d
class AnomalyMapGenerator(nn.Module):
"""Generate Anomaly Heatmap.
Args:
image_size (ListConfig, tuple): Size of the input image. The anomaly map is upsampled to this dimension.
sigma (int, optional): Standard deviation for Gaussian Kernel. Defaults to 4.
"""
def __init__(self, image_size: ListConfig | tuple, sigma: int = 4) -> None:
super().__init__()
self.image_size = image_size if isinstance(image_size, tuple) else tuple(image_size)
kernel_size = 2 * int(4.0 * sigma + 0.5) + 1
self.blur = GaussianBlur2d(kernel_size=(kernel_size, kernel_size), sigma=(sigma, sigma), channels=1)
@staticmethod
def compute_distance(embedding: Tensor, stats: list[Tensor]) -> Tensor:
"""Compute anomaly score to the patch in position(i,j) of a test image.
Ref: Equation (2), Section III-C of the paper.
Args:
embedding (Tensor): Embedding Vector
stats (list[Tensor]): Mean and Covariance Matrix of the multivariate Gaussian distribution
Returns:
Anomaly score of a test image via mahalanobis distance.
"""
batch, channel, height, width = embedding.shape
embedding = embedding.reshape(batch, channel, height * width)
# calculate mahalanobis distances
mean, inv_covariance = stats
delta = (embedding - mean).permute(2, 0, 1)
distances = (torch.matmul(delta, inv_covariance) * delta).sum(2).permute(1, 0)
distances = distances.reshape(batch, 1, height, width)
distances = distances.clamp(0).sqrt()
return distances
def up_sample(self, distance: Tensor) -> Tensor:
"""Up sample anomaly score to match the input image size.
Args:
distance (Tensor): Anomaly score computed via the mahalanobis distance.
Returns:
Resized distance matrix matching the input image size
"""
score_map = F.interpolate(
distance,
size=self.image_size,
mode="bilinear",
align_corners=False,
)
return score_map
def smooth_anomaly_map(self, anomaly_map: Tensor) -> Tensor:
"""Apply gaussian smoothing to the anomaly map.
Args:
anomaly_map (Tensor): Anomaly score for the test image(s).
Returns:
Filtered anomaly scores
"""
blurred_anomaly_map = self.blur(anomaly_map)
return blurred_anomaly_map
def compute_anomaly_map(self, embedding: Tensor, mean: Tensor, inv_covariance: Tensor) -> Tensor:
"""Compute anomaly score.
Scores are calculated based on embedding vector, mean and inv_covariance of the multivariate gaussian
distribution.
Args:
embedding (Tensor): Embedding vector extracted from the test set.
mean (Tensor): Mean of the multivariate gaussian distribution
inv_covariance (Tensor): Inverse Covariance matrix of the multivariate gaussian distribution.
Returns:
Output anomaly score.
"""
score_map = self.compute_distance(
embedding=embedding,
stats=[mean.to(embedding.device), inv_covariance.to(embedding.device)],
)
up_sampled_score_map = self.up_sample(score_map)
smoothed_anomaly_map = self.smooth_anomaly_map(up_sampled_score_map)
return smoothed_anomaly_map
def forward(self, **kwargs) -> Tensor:
"""Returns anomaly_map.
Expects `embedding`, `mean` and `covariance` keywords to be passed explicitly.
Example:
>>> anomaly_map_generator = AnomalyMapGenerator(image_size=input_size)
>>> output = anomaly_map_generator(embedding=embedding, mean=mean, covariance=covariance)
Raises:
ValueError: `embedding`. `mean` or `covariance` keys are not found
Returns:
torch.Tensor: anomaly map
"""
if not ("embedding" in kwargs and "mean" in kwargs and "inv_covariance" in kwargs):
raise ValueError(f"Expected keys `embedding`, `mean` and `covariance`. Found {kwargs.keys()}")
embedding: Tensor = kwargs["embedding"]
mean: Tensor = kwargs["mean"]
inv_covariance: Tensor = kwargs["inv_covariance"]
return self.compute_anomaly_map(embedding, mean, inv_covariance)