Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import cv2
|
|
3 |
import random
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
|
|
6 |
try:
|
7 |
from tensorflow.keras.models import Model
|
8 |
from tensorflow.keras.applications.vgg19 import VGG19, preprocess_input
|
@@ -11,7 +12,6 @@ except ImportError:
|
|
11 |
from keras.models import Model
|
12 |
from keras.applications.vgg19 import VGG19, preprocess_input
|
13 |
except ImportError:
|
14 |
-
# Silently fail if Keras/TensorFlow is not installed, the UI will handle the error.
|
15 |
pass
|
16 |
|
17 |
import matplotlib.pyplot as plt
|
@@ -19,61 +19,43 @@ from scipy.special import kl_div as scipy_kl_div
|
|
19 |
from skimage.metrics import structural_similarity as ssim
|
20 |
import warnings
|
21 |
|
22 |
-
# --- Global Variables ---
|
23 |
TASK = "nodules"
|
24 |
PATH = os.path.join("datasets", TASK, "real")
|
25 |
images = []
|
26 |
-
|
27 |
perceptual_model = None
|
|
|
28 |
try:
|
29 |
-
# Initialize the VGG19 model for the perceptual loss metric.
|
30 |
vgg = VGG19(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
|
31 |
vgg.trainable = False
|
32 |
perceptual_model = Model(inputs=vgg.input, outputs=vgg.get_layer('block5_conv4').output, name="perceptual_model")
|
33 |
except Exception as e:
|
34 |
-
# This will be handled gracefully in the UI if the model fails to load.
|
35 |
perceptual_model = None
|
36 |
|
37 |
-
# --- Utility Functions ---
|
38 |
-
|
39 |
def safe_normalize_heatmap(heatmap):
|
40 |
-
"""Safely normalizes a heatmap to a 0-255 range for visualization, handling non-finite values."""
|
41 |
if heatmap is None or heatmap.size == 0:
|
42 |
return np.zeros((64, 64), dtype=np.uint8)
|
43 |
-
|
44 |
heatmap = heatmap.astype(np.float32)
|
45 |
-
|
46 |
-
# Replace non-finite values (NaN, inf) with numerical ones for safe processing.
|
47 |
if not np.all(np.isfinite(heatmap)):
|
48 |
min_val_safe = np.nanmin(heatmap[np.isfinite(heatmap)]) if np.any(np.isfinite(heatmap)) else 0
|
49 |
max_val_safe = np.nanmax(heatmap[np.isfinite(heatmap)]) if np.any(np.isfinite(heatmap)) else 0
|
50 |
heatmap = np.nan_to_num(heatmap, nan=0.0, posinf=max_val_safe, neginf=min_val_safe)
|
51 |
-
|
52 |
min_val = np.min(heatmap)
|
53 |
max_val = np.max(heatmap)
|
54 |
range_val = max_val - min_val
|
55 |
-
|
56 |
-
# Normalize the heatmap to the 0-255 range.
|
57 |
normalized_heatmap = np.zeros_like(heatmap, dtype=np.float32)
|
58 |
if range_val > 1e-9:
|
59 |
normalized_heatmap = ((heatmap - min_val) / range_val) * 255.0
|
60 |
-
|
61 |
-
normalized_heatmap = np.clip(normalized_heatmap, 0, 255)
|
62 |
return np.uint8(normalized_heatmap)
|
63 |
|
64 |
-
# --- Comparison Metric Functions ---
|
65 |
-
|
66 |
def KL_divergence(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
67 |
-
"""Calculates the Kullback-Leibler Divergence between two images on a block-by-block basis."""
|
68 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape:
|
69 |
return None
|
70 |
-
|
71 |
try:
|
72 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
73 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
74 |
except cv2.error:
|
75 |
return None
|
76 |
-
|
77 |
height, width, channels = img_real_rgb.shape
|
78 |
img_dict = {
|
79 |
"R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)},
|
@@ -85,23 +67,18 @@ def KL_divergence(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=
|
|
85 |
current_block_size = max(1, int(block_size))
|
86 |
if current_block_size > min(height, width):
|
87 |
current_block_size = min(height, width)
|
88 |
-
|
89 |
for channel_idx, key in enumerate(channel_keys):
|
90 |
channel_sum = 0.0
|
91 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
92 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
93 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten() + epsilon
|
94 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten() + epsilon
|
95 |
-
|
96 |
-
# Normalize distributions within the block
|
97 |
if np.sum(block_gt) > 0 and np.sum(block_pred) > 0:
|
98 |
block_gt_norm = block_gt / np.sum(block_gt)
|
99 |
block_pred_norm = block_pred / np.sum(block_pred)
|
100 |
-
|
101 |
kl_values = scipy_kl_div(block_gt_norm, block_pred_norm)
|
102 |
kl_values = np.nan_to_num(kl_values, nan=0.0, posinf=0.0, neginf=0.0)
|
103 |
kl_sum_block = np.sum(kl_values)
|
104 |
-
|
105 |
if np.isfinite(kl_sum_block):
|
106 |
channel_sum += kl_sum_block
|
107 |
mean_kl_block = kl_sum_block / max(1, current_block_size * current_block_size)
|
@@ -109,27 +86,22 @@ def KL_divergence(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=
|
|
109 |
if sum_channels:
|
110 |
img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_kl_block
|
111 |
img_dict[key]["SUM"] = channel_sum
|
112 |
-
|
113 |
if sum_channels:
|
114 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
115 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
116 |
-
|
117 |
return img_dict
|
118 |
|
119 |
def L1_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
120 |
-
"""Calculates the L1 (Mean Absolute Error) loss between two images."""
|
121 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
122 |
try:
|
123 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
124 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
125 |
except cv2.error: return None
|
126 |
-
|
127 |
height, width, channels = img_real_rgb.shape
|
128 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
129 |
channel_keys = ["R", "G", "B"]
|
130 |
current_block_size = max(1, int(block_size))
|
131 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
132 |
-
|
133 |
for channel_idx, key in enumerate(channel_keys):
|
134 |
channel_sum = 0.0
|
135 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
@@ -143,26 +115,22 @@ def L1_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False)
|
|
143 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = mean_l1_block
|
144 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_l1_block
|
145 |
img_dict[key]["SUM"] = channel_sum
|
146 |
-
|
147 |
if sum_channels:
|
148 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
149 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
150 |
return img_dict
|
151 |
|
152 |
def MSE_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
153 |
-
"""Calculates the L2 (Mean Squared Error) loss between two images."""
|
154 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
155 |
try:
|
156 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
157 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
158 |
except cv2.error: return None
|
159 |
-
|
160 |
height, width, channels = img_real_rgb.shape
|
161 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
162 |
channel_keys = ["R", "G", "B"]
|
163 |
current_block_size = max(1, int(block_size))
|
164 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
165 |
-
|
166 |
for channel_idx, key in enumerate(channel_keys):
|
167 |
channel_sum = 0.0
|
168 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
@@ -176,29 +144,24 @@ def MSE_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False
|
|
176 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = mean_mse_block
|
177 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_mse_block
|
178 |
img_dict[key]["SUM"] = channel_sum
|
179 |
-
|
180 |
if sum_channels:
|
181 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
182 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
183 |
return img_dict
|
184 |
|
185 |
def SSIM_loss(img_real, img_fake, block_size=7, sum_channels=False):
|
186 |
-
"""Calculates the Structural Similarity Index Measure (SSIM) loss between two images."""
|
187 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
188 |
try:
|
189 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB)
|
190 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB)
|
191 |
except cv2.error: return None
|
192 |
-
|
193 |
height, width, channels = img_real_rgb.shape
|
194 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
195 |
channel_keys = ["R", "G", "B"]
|
196 |
-
|
197 |
for channel_idx, key in enumerate(channel_keys):
|
198 |
win_size = int(block_size)
|
199 |
if win_size % 2 == 0: win_size += 1
|
200 |
win_size = max(3, min(win_size, height, width))
|
201 |
-
|
202 |
try:
|
203 |
_, ssim_map = ssim(img_real_rgb[:, :, channel_idx], img_fake_rgb[:, :, channel_idx], win_size=win_size, data_range=255, full=True, gaussian_weights=True)
|
204 |
ssim_loss_map = np.maximum(0.0, 1.0 - ssim_map)
|
@@ -208,131 +171,101 @@ def SSIM_loss(img_real, img_fake, block_size=7, sum_channels=False):
|
|
208 |
except ValueError:
|
209 |
img_dict[key]["SUM"] = 0.0
|
210 |
img_dict[key]["HEATMAP"] = np.zeros((height, width), dtype=np.float32)
|
211 |
-
|
212 |
if sum_channels:
|
213 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
214 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
215 |
return img_dict
|
216 |
|
217 |
def cosine_similarity_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
218 |
-
"""Calculates the Cosine Similarity loss between two images on a block-by-block basis."""
|
219 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
220 |
try:
|
221 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
222 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
223 |
except cv2.error: return None
|
224 |
-
|
225 |
height, width, channels = img_real_rgb.shape
|
226 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
227 |
channel_keys = ["R", "G", "B"]
|
228 |
current_block_size = max(1, int(block_size))
|
229 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
230 |
-
|
231 |
for channel_idx, key in enumerate(channel_keys):
|
232 |
channel_sum = 0.0
|
233 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
234 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
235 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten()
|
236 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten()
|
237 |
-
|
238 |
dot_product = np.dot(block_pred, block_gt)
|
239 |
norm_pred = np.linalg.norm(block_pred)
|
240 |
norm_gt = np.linalg.norm(block_gt)
|
241 |
-
|
242 |
cosine_sim = 0.0
|
243 |
if norm_pred * norm_gt > epsilon:
|
244 |
cosine_sim = dot_product / (norm_pred * norm_gt)
|
245 |
elif norm_pred < epsilon and norm_gt < epsilon:
|
246 |
-
cosine_sim = 1.0
|
247 |
-
|
248 |
result_block = 1.0 - np.clip(cosine_sim, -1.0, 1.0)
|
249 |
channel_sum += result_block
|
250 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = result_block
|
251 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += result_block
|
252 |
img_dict[key]["SUM"] = channel_sum
|
253 |
-
|
254 |
if sum_channels:
|
255 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
256 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
257 |
return img_dict
|
258 |
|
259 |
def TV_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
260 |
-
"""Calculates the Total Variation (TV) loss between two images."""
|
261 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
262 |
try:
|
263 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
264 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
265 |
except cv2.error: return None
|
266 |
-
|
267 |
height, width, channels = img_real_rgb.shape
|
268 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
269 |
channel_keys = ["R", "G", "B"]
|
270 |
-
current_block_size = max(2, int(block_size))
|
271 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
272 |
-
|
273 |
for channel_idx, key in enumerate(channel_keys):
|
274 |
channel_sum = 0.0
|
275 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
276 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
277 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx]
|
278 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx]
|
279 |
-
|
280 |
tv_pred = np.sum(np.abs(block_pred[:, 1:] - block_pred[:, :-1])) + np.sum(np.abs(block_pred[1:, :] - block_pred[:-1, :]))
|
281 |
tv_gt = np.sum(np.abs(block_gt[:, 1:] - block_gt[:, :-1])) + np.sum(np.abs(block_gt[1:, :] - block_gt[:-1, :]))
|
282 |
result_block = np.abs(tv_pred - tv_gt)
|
283 |
-
|
284 |
channel_sum += result_block
|
285 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = result_block
|
286 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += result_block
|
287 |
img_dict[key]["SUM"] = channel_sum
|
288 |
-
|
289 |
if sum_channels:
|
290 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
291 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
292 |
return img_dict
|
293 |
|
294 |
def perceptual_loss(img_real, img_fake, model, block_size=4):
|
295 |
-
"""Calculates the perceptual loss using a pre-trained VGG19 model."""
|
296 |
if img_real is None or img_fake is None or model is None or img_real.shape != img_fake.shape:
|
297 |
return None
|
298 |
-
|
299 |
original_height, original_width, _ = img_real.shape
|
300 |
try:
|
301 |
-
# Determine the target input size from the model
|
302 |
target_size = (model.input_shape[1], model.input_shape[2])
|
303 |
cv2_target_size = (target_size[1], target_size[0])
|
304 |
-
|
305 |
-
# Resize, convert to RGB, and preprocess images for the model
|
306 |
img_real_resized = cv2.resize(img_real, cv2_target_size, interpolation=cv2.INTER_AREA)
|
307 |
img_fake_resized = cv2.resize(img_fake, cv2_target_size, interpolation=cv2.INTER_AREA)
|
308 |
img_real_processed = preprocess_input(np.expand_dims(cv2.cvtColor(img_real_resized, cv2.COLOR_BGR2RGB), axis=0))
|
309 |
img_fake_processed = preprocess_input(np.expand_dims(cv2.cvtColor(img_fake_resized, cv2.COLOR_BGR2RGB), axis=0))
|
310 |
except Exception:
|
311 |
return None
|
312 |
-
|
313 |
try:
|
314 |
-
# Get feature maps from the model
|
315 |
img_real_vgg = model.predict(img_real_processed)
|
316 |
img_fake_vgg = model.predict(img_fake_processed)
|
317 |
except Exception:
|
318 |
return None
|
319 |
-
|
320 |
-
# Calculate MSE between feature maps
|
321 |
feature_mse = np.square(img_real_vgg - img_fake_vgg)
|
322 |
total_loss = np.sum(feature_mse)
|
323 |
heatmap_features = np.mean(feature_mse[0, :, :, :], axis=-1)
|
324 |
-
|
325 |
-
# Resize heatmap back to original image dimensions
|
326 |
heatmap_original_size = cv2.resize(heatmap_features, (original_width, original_height), interpolation=cv2.INTER_LINEAR)
|
327 |
-
|
328 |
return {"SUM": {"SUM": total_loss, "HEATMAP": heatmap_original_size.astype(np.float32)}}
|
329 |
|
330 |
-
# --- Gradio Core Logic ---
|
331 |
-
|
332 |
def gather_images(task):
|
333 |
-
"""Loads a random pair of real and fake images from the selected task directory."""
|
334 |
global TASK, PATH, images
|
335 |
-
|
336 |
new_path = os.path.join("datasets", task, "real")
|
337 |
if TASK != task or not images:
|
338 |
PATH = new_path
|
@@ -353,31 +286,24 @@ def gather_images(task):
|
|
353 |
error_msg = f"Error reading directory {PATH}: {e}"
|
354 |
placeholder = np.zeros((256, 256, 3), dtype=np.uint8)
|
355 |
return placeholder, placeholder, error_msg
|
356 |
-
|
357 |
if not images:
|
358 |
error_msg = f"Error: No images available for task '{task}'."
|
359 |
placeholder = np.zeros((256, 256, 3), dtype=np.uint8)
|
360 |
return placeholder, placeholder, error_msg
|
361 |
-
|
362 |
try:
|
363 |
real_img_path = random.choice(images)
|
364 |
img_filename = os.path.basename(real_img_path)
|
365 |
fake_img_path = os.path.join("datasets", task, "fake", img_filename)
|
366 |
-
|
367 |
real_img = cv2.imread(real_img_path)
|
368 |
fake_img = cv2.imread(fake_img_path)
|
369 |
-
|
370 |
placeholder_shape = (256, 256, 3)
|
371 |
if real_img is None:
|
372 |
return np.zeros(placeholder_shape, dtype=np.uint8), fake_img if fake_img is not None else np.zeros(placeholder_shape, dtype=np.uint8), f"Error: Failed to load real image: {real_img_path}"
|
373 |
if fake_img is None:
|
374 |
return real_img, np.zeros(real_img.shape, dtype=np.uint8), f"Error: Failed to load fake image: {fake_img_path}"
|
375 |
-
|
376 |
-
# Ensure images have the same dimensions for comparison
|
377 |
if real_img.shape != fake_img.shape:
|
378 |
target_dims = (real_img.shape[1], real_img.shape[0])
|
379 |
fake_img = cv2.resize(fake_img, target_dims, interpolation=cv2.INTER_AREA)
|
380 |
-
|
381 |
return real_img, fake_img, f"Sample pair for '{task}' loaded successfully."
|
382 |
except Exception as e:
|
383 |
error_msg = f"An unexpected error occurred during image loading: {e}"
|
@@ -385,17 +311,18 @@ def gather_images(task):
|
|
385 |
return placeholder, placeholder, error_msg
|
386 |
|
387 |
def run_comparison(real, fake, measurement, block_size_val):
|
388 |
-
"""Runs the selected comparison and returns the heatmap and a status message."""
|
389 |
placeholder_heatmap = np.zeros((64, 64, 3), dtype=np.uint8)
|
390 |
if real is None or fake is None or not isinstance(real, np.ndarray) or not isinstance(fake, np.ndarray):
|
391 |
-
return placeholder_heatmap, "Error: Input image(s) missing or invalid. Please
|
392 |
-
|
|
|
393 |
if real.shape != fake.shape:
|
394 |
-
|
|
|
|
|
395 |
|
396 |
result = None
|
397 |
block_size_int = int(block_size_val)
|
398 |
-
|
399 |
try:
|
400 |
if measurement == "Kullback-Leibler Divergence": result = KL_divergence(real, fake, block_size=block_size_int, sum_channels=True)
|
401 |
elif measurement == "L1-Loss": result = L1_loss(real, fake, block_size=block_size_int, sum_channels=True)
|
@@ -411,79 +338,103 @@ def run_comparison(real, fake, measurement, block_size_val):
|
|
411 |
return placeholder_heatmap, f"Error: Unknown measurement '{measurement}'."
|
412 |
except Exception as e:
|
413 |
return placeholder_heatmap, f"Error during {measurement} calculation: {e}"
|
414 |
-
|
415 |
if result is None or "SUM" not in result or "HEATMAP" not in result["SUM"]:
|
416 |
return placeholder_heatmap, f"{measurement} calculation failed or returned an invalid result structure."
|
417 |
-
|
418 |
heatmap_raw = result["SUM"]["HEATMAP"]
|
419 |
if not isinstance(heatmap_raw, np.ndarray) or heatmap_raw.size == 0:
|
420 |
return placeholder_heatmap, f"Generated heatmap is invalid or empty for {measurement}."
|
421 |
-
|
422 |
try:
|
423 |
heatmap_normalized = safe_normalize_heatmap(heatmap_raw)
|
424 |
heatmap_color = cv2.applyColorMap(heatmap_normalized, cv2.COLORMAP_HOT)
|
425 |
heatmap_rgb = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)
|
426 |
except Exception as e:
|
427 |
return placeholder_heatmap, f"Error during heatmap coloring: {e}"
|
428 |
-
|
429 |
-
status_msg = f"{measurement} comparison successful."
|
430 |
return heatmap_rgb, status_msg
|
431 |
|
432 |
-
|
|
|
433 |
|
434 |
theme = gr.themes.Soft(primary_hue="blue", secondary_hue="orange")
|
435 |
-
|
436 |
-
with gr.Blocks(theme=theme, css=".gradio-container { max-width: 1200px !important; margin: auto; }") as demo:
|
437 |
gr.Markdown("# GAN vs Ground Truth Image Comparison")
|
438 |
-
gr.Markdown("
|
439 |
-
|
440 |
-
|
441 |
-
status_message = gr.Textbox(label="Status / Errors", lines=2, interactive=False, show_copy_button=True, scale=1)
|
442 |
|
443 |
with gr.Row(equal_height=False):
|
444 |
-
with gr.Column(scale=
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
)
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
466 |
run_button = gr.Button("π Run Comparison", variant="primary")
|
467 |
|
468 |
-
with gr.Column(scale=
|
|
|
469 |
heatmap_display = gr.Image(type="numpy", label="Comparison Heatmap (Difference)", height=350, interactive=False)
|
470 |
-
|
471 |
-
# --- Event Handlers ---
|
472 |
-
|
473 |
-
# When the "Get New Sample Pair" button is clicked
|
474 |
sample_button.click(
|
475 |
fn=gather_images,
|
476 |
inputs=[task_dropdown],
|
477 |
outputs=[real_img_display, fake_img_display, status_message]
|
478 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
479 |
|
480 |
-
# When the "Run Comparison" button is clicked
|
481 |
run_button.click(
|
482 |
fn=run_comparison,
|
483 |
inputs=[real_img_display, fake_img_display, measurement_dropdown, block_size_slider],
|
484 |
-
outputs=[heatmap_display, status_message]
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
)
|
486 |
|
|
|
487 |
if __name__ == "__main__":
|
488 |
print("-------------------------------------------------------------")
|
489 |
print("Verifying VGG19 model status...")
|
@@ -502,5 +453,4 @@ if __name__ == "__main__":
|
|
502 |
print("Launching Gradio App...")
|
503 |
print("Access the app in your browser, usually at: http://127.0.0.1:7860")
|
504 |
print("-------------------------------------------------------------")
|
505 |
-
|
506 |
demo.launch(share=False, debug=False)
|
|
|
3 |
import random
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
6 |
+
|
7 |
try:
|
8 |
from tensorflow.keras.models import Model
|
9 |
from tensorflow.keras.applications.vgg19 import VGG19, preprocess_input
|
|
|
12 |
from keras.models import Model
|
13 |
from keras.applications.vgg19 import VGG19, preprocess_input
|
14 |
except ImportError:
|
|
|
15 |
pass
|
16 |
|
17 |
import matplotlib.pyplot as plt
|
|
|
19 |
from skimage.metrics import structural_similarity as ssim
|
20 |
import warnings
|
21 |
|
|
|
22 |
TASK = "nodules"
|
23 |
PATH = os.path.join("datasets", TASK, "real")
|
24 |
images = []
|
|
|
25 |
perceptual_model = None
|
26 |
+
|
27 |
try:
|
|
|
28 |
vgg = VGG19(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
|
29 |
vgg.trainable = False
|
30 |
perceptual_model = Model(inputs=vgg.input, outputs=vgg.get_layer('block5_conv4').output, name="perceptual_model")
|
31 |
except Exception as e:
|
|
|
32 |
perceptual_model = None
|
33 |
|
|
|
|
|
34 |
def safe_normalize_heatmap(heatmap):
|
|
|
35 |
if heatmap is None or heatmap.size == 0:
|
36 |
return np.zeros((64, 64), dtype=np.uint8)
|
|
|
37 |
heatmap = heatmap.astype(np.float32)
|
|
|
|
|
38 |
if not np.all(np.isfinite(heatmap)):
|
39 |
min_val_safe = np.nanmin(heatmap[np.isfinite(heatmap)]) if np.any(np.isfinite(heatmap)) else 0
|
40 |
max_val_safe = np.nanmax(heatmap[np.isfinite(heatmap)]) if np.any(np.isfinite(heatmap)) else 0
|
41 |
heatmap = np.nan_to_num(heatmap, nan=0.0, posinf=max_val_safe, neginf=min_val_safe)
|
|
|
42 |
min_val = np.min(heatmap)
|
43 |
max_val = np.max(heatmap)
|
44 |
range_val = max_val - min_val
|
|
|
|
|
45 |
normalized_heatmap = np.zeros_like(heatmap, dtype=np.float32)
|
46 |
if range_val > 1e-9:
|
47 |
normalized_heatmap = ((heatmap - min_val) / range_val) * 255.0
|
48 |
+
normalized_heatmap = np.clip(normalized_heatmap, 0, 255)
|
|
|
49 |
return np.uint8(normalized_heatmap)
|
50 |
|
|
|
|
|
51 |
def KL_divergence(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
|
|
52 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape:
|
53 |
return None
|
|
|
54 |
try:
|
55 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
56 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
57 |
except cv2.error:
|
58 |
return None
|
|
|
59 |
height, width, channels = img_real_rgb.shape
|
60 |
img_dict = {
|
61 |
"R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)},
|
|
|
67 |
current_block_size = max(1, int(block_size))
|
68 |
if current_block_size > min(height, width):
|
69 |
current_block_size = min(height, width)
|
|
|
70 |
for channel_idx, key in enumerate(channel_keys):
|
71 |
channel_sum = 0.0
|
72 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
73 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
74 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten() + epsilon
|
75 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten() + epsilon
|
|
|
|
|
76 |
if np.sum(block_gt) > 0 and np.sum(block_pred) > 0:
|
77 |
block_gt_norm = block_gt / np.sum(block_gt)
|
78 |
block_pred_norm = block_pred / np.sum(block_pred)
|
|
|
79 |
kl_values = scipy_kl_div(block_gt_norm, block_pred_norm)
|
80 |
kl_values = np.nan_to_num(kl_values, nan=0.0, posinf=0.0, neginf=0.0)
|
81 |
kl_sum_block = np.sum(kl_values)
|
|
|
82 |
if np.isfinite(kl_sum_block):
|
83 |
channel_sum += kl_sum_block
|
84 |
mean_kl_block = kl_sum_block / max(1, current_block_size * current_block_size)
|
|
|
86 |
if sum_channels:
|
87 |
img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_kl_block
|
88 |
img_dict[key]["SUM"] = channel_sum
|
|
|
89 |
if sum_channels:
|
90 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
91 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
|
|
92 |
return img_dict
|
93 |
|
94 |
def L1_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
|
|
95 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
96 |
try:
|
97 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
98 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
99 |
except cv2.error: return None
|
|
|
100 |
height, width, channels = img_real_rgb.shape
|
101 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
102 |
channel_keys = ["R", "G", "B"]
|
103 |
current_block_size = max(1, int(block_size))
|
104 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
|
|
105 |
for channel_idx, key in enumerate(channel_keys):
|
106 |
channel_sum = 0.0
|
107 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
|
|
115 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = mean_l1_block
|
116 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_l1_block
|
117 |
img_dict[key]["SUM"] = channel_sum
|
|
|
118 |
if sum_channels:
|
119 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
120 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
121 |
return img_dict
|
122 |
|
123 |
def MSE_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
|
|
124 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
125 |
try:
|
126 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
127 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
128 |
except cv2.error: return None
|
|
|
129 |
height, width, channels = img_real_rgb.shape
|
130 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
131 |
channel_keys = ["R", "G", "B"]
|
132 |
current_block_size = max(1, int(block_size))
|
133 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
|
|
134 |
for channel_idx, key in enumerate(channel_keys):
|
135 |
channel_sum = 0.0
|
136 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
|
|
144 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = mean_mse_block
|
145 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += mean_mse_block
|
146 |
img_dict[key]["SUM"] = channel_sum
|
|
|
147 |
if sum_channels:
|
148 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
149 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
150 |
return img_dict
|
151 |
|
152 |
def SSIM_loss(img_real, img_fake, block_size=7, sum_channels=False):
|
|
|
153 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
154 |
try:
|
155 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB)
|
156 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB)
|
157 |
except cv2.error: return None
|
|
|
158 |
height, width, channels = img_real_rgb.shape
|
159 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
160 |
channel_keys = ["R", "G", "B"]
|
|
|
161 |
for channel_idx, key in enumerate(channel_keys):
|
162 |
win_size = int(block_size)
|
163 |
if win_size % 2 == 0: win_size += 1
|
164 |
win_size = max(3, min(win_size, height, width))
|
|
|
165 |
try:
|
166 |
_, ssim_map = ssim(img_real_rgb[:, :, channel_idx], img_fake_rgb[:, :, channel_idx], win_size=win_size, data_range=255, full=True, gaussian_weights=True)
|
167 |
ssim_loss_map = np.maximum(0.0, 1.0 - ssim_map)
|
|
|
171 |
except ValueError:
|
172 |
img_dict[key]["SUM"] = 0.0
|
173 |
img_dict[key]["HEATMAP"] = np.zeros((height, width), dtype=np.float32)
|
|
|
174 |
if sum_channels:
|
175 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
176 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
177 |
return img_dict
|
178 |
|
179 |
def cosine_similarity_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
|
|
180 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
181 |
try:
|
182 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
183 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
184 |
except cv2.error: return None
|
|
|
185 |
height, width, channels = img_real_rgb.shape
|
186 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
187 |
channel_keys = ["R", "G", "B"]
|
188 |
current_block_size = max(1, int(block_size))
|
189 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
|
|
190 |
for channel_idx, key in enumerate(channel_keys):
|
191 |
channel_sum = 0.0
|
192 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
193 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
194 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten()
|
195 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx].flatten()
|
|
|
196 |
dot_product = np.dot(block_pred, block_gt)
|
197 |
norm_pred = np.linalg.norm(block_pred)
|
198 |
norm_gt = np.linalg.norm(block_gt)
|
|
|
199 |
cosine_sim = 0.0
|
200 |
if norm_pred * norm_gt > epsilon:
|
201 |
cosine_sim = dot_product / (norm_pred * norm_gt)
|
202 |
elif norm_pred < epsilon and norm_gt < epsilon:
|
203 |
+
cosine_sim = 1.0
|
|
|
204 |
result_block = 1.0 - np.clip(cosine_sim, -1.0, 1.0)
|
205 |
channel_sum += result_block
|
206 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = result_block
|
207 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += result_block
|
208 |
img_dict[key]["SUM"] = channel_sum
|
|
|
209 |
if sum_channels:
|
210 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
211 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
212 |
return img_dict
|
213 |
|
214 |
def TV_loss(img_real, img_fake, epsilon=1e-10, block_size=4, sum_channels=False):
|
|
|
215 |
if img_real is None or img_fake is None or img_real.shape != img_fake.shape: return None
|
216 |
try:
|
217 |
img_real_rgb = cv2.cvtColor(img_real, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
218 |
img_fake_rgb = cv2.cvtColor(img_fake, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0
|
219 |
except cv2.error: return None
|
|
|
220 |
height, width, channels = img_real_rgb.shape
|
221 |
img_dict = { "R": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "G": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "B": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)}, "SUM": {"SUM": 0.0, "HEATMAP": np.zeros((height, width), dtype=np.float32)} }
|
222 |
channel_keys = ["R", "G", "B"]
|
223 |
+
current_block_size = max(2, int(block_size))
|
224 |
if current_block_size > min(height, width): current_block_size = min(height, width)
|
|
|
225 |
for channel_idx, key in enumerate(channel_keys):
|
226 |
channel_sum = 0.0
|
227 |
for i in range(0, height - current_block_size + 1, current_block_size):
|
228 |
for j in range(0, width - current_block_size + 1, current_block_size):
|
229 |
block_pred = img_fake_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx]
|
230 |
block_gt = img_real_rgb[i:i+current_block_size, j:j+current_block_size, channel_idx]
|
|
|
231 |
tv_pred = np.sum(np.abs(block_pred[:, 1:] - block_pred[:, :-1])) + np.sum(np.abs(block_pred[1:, :] - block_pred[:-1, :]))
|
232 |
tv_gt = np.sum(np.abs(block_gt[:, 1:] - block_gt[:, :-1])) + np.sum(np.abs(block_gt[1:, :] - block_gt[:-1, :]))
|
233 |
result_block = np.abs(tv_pred - tv_gt)
|
|
|
234 |
channel_sum += result_block
|
235 |
img_dict[key]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] = result_block
|
236 |
if sum_channels: img_dict["SUM"]["HEATMAP"][i:i+current_block_size, j:j+current_block_size] += result_block
|
237 |
img_dict[key]["SUM"] = channel_sum
|
|
|
238 |
if sum_channels:
|
239 |
img_dict["SUM"]["SUM"] = img_dict["R"]["SUM"] + img_dict["G"]["SUM"] + img_dict["B"]["SUM"]
|
240 |
img_dict["SUM"]["HEATMAP"] /= max(1, channels)
|
241 |
return img_dict
|
242 |
|
243 |
def perceptual_loss(img_real, img_fake, model, block_size=4):
|
|
|
244 |
if img_real is None or img_fake is None or model is None or img_real.shape != img_fake.shape:
|
245 |
return None
|
|
|
246 |
original_height, original_width, _ = img_real.shape
|
247 |
try:
|
|
|
248 |
target_size = (model.input_shape[1], model.input_shape[2])
|
249 |
cv2_target_size = (target_size[1], target_size[0])
|
|
|
|
|
250 |
img_real_resized = cv2.resize(img_real, cv2_target_size, interpolation=cv2.INTER_AREA)
|
251 |
img_fake_resized = cv2.resize(img_fake, cv2_target_size, interpolation=cv2.INTER_AREA)
|
252 |
img_real_processed = preprocess_input(np.expand_dims(cv2.cvtColor(img_real_resized, cv2.COLOR_BGR2RGB), axis=0))
|
253 |
img_fake_processed = preprocess_input(np.expand_dims(cv2.cvtColor(img_fake_resized, cv2.COLOR_BGR2RGB), axis=0))
|
254 |
except Exception:
|
255 |
return None
|
|
|
256 |
try:
|
|
|
257 |
img_real_vgg = model.predict(img_real_processed)
|
258 |
img_fake_vgg = model.predict(img_fake_processed)
|
259 |
except Exception:
|
260 |
return None
|
|
|
|
|
261 |
feature_mse = np.square(img_real_vgg - img_fake_vgg)
|
262 |
total_loss = np.sum(feature_mse)
|
263 |
heatmap_features = np.mean(feature_mse[0, :, :, :], axis=-1)
|
|
|
|
|
264 |
heatmap_original_size = cv2.resize(heatmap_features, (original_width, original_height), interpolation=cv2.INTER_LINEAR)
|
|
|
265 |
return {"SUM": {"SUM": total_loss, "HEATMAP": heatmap_original_size.astype(np.float32)}}
|
266 |
|
|
|
|
|
267 |
def gather_images(task):
|
|
|
268 |
global TASK, PATH, images
|
|
|
269 |
new_path = os.path.join("datasets", task, "real")
|
270 |
if TASK != task or not images:
|
271 |
PATH = new_path
|
|
|
286 |
error_msg = f"Error reading directory {PATH}: {e}"
|
287 |
placeholder = np.zeros((256, 256, 3), dtype=np.uint8)
|
288 |
return placeholder, placeholder, error_msg
|
|
|
289 |
if not images:
|
290 |
error_msg = f"Error: No images available for task '{task}'."
|
291 |
placeholder = np.zeros((256, 256, 3), dtype=np.uint8)
|
292 |
return placeholder, placeholder, error_msg
|
|
|
293 |
try:
|
294 |
real_img_path = random.choice(images)
|
295 |
img_filename = os.path.basename(real_img_path)
|
296 |
fake_img_path = os.path.join("datasets", task, "fake", img_filename)
|
|
|
297 |
real_img = cv2.imread(real_img_path)
|
298 |
fake_img = cv2.imread(fake_img_path)
|
|
|
299 |
placeholder_shape = (256, 256, 3)
|
300 |
if real_img is None:
|
301 |
return np.zeros(placeholder_shape, dtype=np.uint8), fake_img if fake_img is not None else np.zeros(placeholder_shape, dtype=np.uint8), f"Error: Failed to load real image: {real_img_path}"
|
302 |
if fake_img is None:
|
303 |
return real_img, np.zeros(real_img.shape, dtype=np.uint8), f"Error: Failed to load fake image: {fake_img_path}"
|
|
|
|
|
304 |
if real_img.shape != fake_img.shape:
|
305 |
target_dims = (real_img.shape[1], real_img.shape[0])
|
306 |
fake_img = cv2.resize(fake_img, target_dims, interpolation=cv2.INTER_AREA)
|
|
|
307 |
return real_img, fake_img, f"Sample pair for '{task}' loaded successfully."
|
308 |
except Exception as e:
|
309 |
error_msg = f"An unexpected error occurred during image loading: {e}"
|
|
|
311 |
return placeholder, placeholder, error_msg
|
312 |
|
313 |
def run_comparison(real, fake, measurement, block_size_val):
|
|
|
314 |
placeholder_heatmap = np.zeros((64, 64, 3), dtype=np.uint8)
|
315 |
if real is None or fake is None or not isinstance(real, np.ndarray) or not isinstance(fake, np.ndarray):
|
316 |
+
return placeholder_heatmap, "Error: Input image(s) missing or invalid. Please load or upload a pair of images."
|
317 |
+
|
318 |
+
status_msg_prefix = ""
|
319 |
if real.shape != fake.shape:
|
320 |
+
status_msg_prefix = f"Warning: Input images have different shapes ({real.shape} vs {fake.shape}). Resizing fake image to match real. "
|
321 |
+
target_dims = (real.shape[1], real.shape[0])
|
322 |
+
fake = cv2.resize(fake, target_dims, interpolation=cv2.INTER_AREA)
|
323 |
|
324 |
result = None
|
325 |
block_size_int = int(block_size_val)
|
|
|
326 |
try:
|
327 |
if measurement == "Kullback-Leibler Divergence": result = KL_divergence(real, fake, block_size=block_size_int, sum_channels=True)
|
328 |
elif measurement == "L1-Loss": result = L1_loss(real, fake, block_size=block_size_int, sum_channels=True)
|
|
|
338 |
return placeholder_heatmap, f"Error: Unknown measurement '{measurement}'."
|
339 |
except Exception as e:
|
340 |
return placeholder_heatmap, f"Error during {measurement} calculation: {e}"
|
|
|
341 |
if result is None or "SUM" not in result or "HEATMAP" not in result["SUM"]:
|
342 |
return placeholder_heatmap, f"{measurement} calculation failed or returned an invalid result structure."
|
|
|
343 |
heatmap_raw = result["SUM"]["HEATMAP"]
|
344 |
if not isinstance(heatmap_raw, np.ndarray) or heatmap_raw.size == 0:
|
345 |
return placeholder_heatmap, f"Generated heatmap is invalid or empty for {measurement}."
|
|
|
346 |
try:
|
347 |
heatmap_normalized = safe_normalize_heatmap(heatmap_raw)
|
348 |
heatmap_color = cv2.applyColorMap(heatmap_normalized, cv2.COLORMAP_HOT)
|
349 |
heatmap_rgb = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)
|
350 |
except Exception as e:
|
351 |
return placeholder_heatmap, f"Error during heatmap coloring: {e}"
|
352 |
+
status_msg = status_msg_prefix + f"{measurement} comparison successful."
|
|
|
353 |
return heatmap_rgb, status_msg
|
354 |
|
355 |
+
def clear_uploads(msg):
|
356 |
+
return None, None, msg
|
357 |
|
358 |
theme = gr.themes.Soft(primary_hue="blue", secondary_hue="orange")
|
359 |
+
with gr.Blocks(theme=theme, css=".gradio-container { max-width: 1400px !important; margin: auto; }") as demo:
|
|
|
360 |
gr.Markdown("# GAN vs Ground Truth Image Comparison")
|
361 |
+
gr.Markdown("Compare images by loading a sample pair from a dataset or by uploading your own. Choose a comparison metric and run the analysis to see the difference heatmap.")
|
362 |
+
|
363 |
+
status_message = gr.Textbox(label="Status / Errors", lines=2, interactive=False, show_copy_button=True)
|
|
|
364 |
|
365 |
with gr.Row(equal_height=False):
|
366 |
+
with gr.Column(scale=1, min_width=300):
|
367 |
+
gr.Markdown("### 1. Get Images")
|
368 |
+
with gr.Tabs():
|
369 |
+
with gr.TabItem("Load from Dataset"):
|
370 |
+
task_dropdown = gr.Dropdown(
|
371 |
+
["nodules", "facades"], value=TASK,
|
372 |
+
info="Select the dataset task.",
|
373 |
+
label="Dataset Task"
|
374 |
+
)
|
375 |
+
sample_button = gr.Button("π Get New Sample Pair", variant="secondary")
|
376 |
+
with gr.TabItem("Upload Images"):
|
377 |
+
gr.Markdown("Upload your own images to compare.")
|
378 |
+
upload_real_img = gr.Image(type="numpy", label="Upload Real/Reference Image")
|
379 |
+
upload_fake_img = gr.Image(type="numpy", label="Upload Fake/Comparison Image")
|
380 |
+
|
381 |
+
with gr.Column(scale=2, min_width=600):
|
382 |
+
gr.Markdown("### 2. View Images & Run Comparison")
|
383 |
+
with gr.Row():
|
384 |
+
real_img_display = gr.Image(type="numpy", label="Real Image (Ground Truth)", height=350, interactive=False)
|
385 |
+
fake_img_display = gr.Image(type="numpy", label="Fake Image (Generated by GAN)", height=350, interactive=False)
|
386 |
+
|
387 |
+
with gr.Row():
|
388 |
+
measurement_dropdown = gr.Dropdown(
|
389 |
+
["Kullback-Leibler Divergence", "L1-Loss", "MSE", "SSIM", "Cosine Similarity", "TV", "Perceptual"],
|
390 |
+
value="Kullback-Leibler Divergence",
|
391 |
+
info="Select the comparison metric.",
|
392 |
+
label="Comparison Metric",
|
393 |
+
scale=2
|
394 |
+
)
|
395 |
+
block_size_slider = gr.Slider(
|
396 |
+
minimum=2, maximum=64, value=8, step=2,
|
397 |
+
info="Size of the block/window for comparison.",
|
398 |
+
label="Block/Window Size",
|
399 |
+
scale=1
|
400 |
+
)
|
401 |
run_button = gr.Button("π Run Comparison", variant="primary")
|
402 |
|
403 |
+
with gr.Column(scale=1, min_width=300):
|
404 |
+
gr.Markdown("### 3. See Result")
|
405 |
heatmap_display = gr.Image(type="numpy", label="Comparison Heatmap (Difference)", height=350, interactive=False)
|
406 |
+
|
|
|
|
|
|
|
407 |
sample_button.click(
|
408 |
fn=gather_images,
|
409 |
inputs=[task_dropdown],
|
410 |
outputs=[real_img_display, fake_img_display, status_message]
|
411 |
)
|
412 |
+
|
413 |
+
upload_real_img.upload(
|
414 |
+
fn=lambda x: x,
|
415 |
+
inputs=[upload_real_img],
|
416 |
+
outputs=[real_img_display]
|
417 |
+
)
|
418 |
+
|
419 |
+
upload_fake_img.upload(
|
420 |
+
fn=lambda x: x,
|
421 |
+
inputs=[upload_fake_img],
|
422 |
+
outputs=[fake_img_display]
|
423 |
+
)
|
424 |
|
|
|
425 |
run_button.click(
|
426 |
fn=run_comparison,
|
427 |
inputs=[real_img_display, fake_img_display, measurement_dropdown, block_size_slider],
|
428 |
+
outputs=[heatmap_display, status_message]
|
429 |
+
)
|
430 |
+
|
431 |
+
task_dropdown.change(
|
432 |
+
fn=clear_uploads,
|
433 |
+
inputs=[gr.Textbox(value="Task changed. Please get a new sample.", visible=False)],
|
434 |
+
outputs=[real_img_display, fake_img_display, status_message]
|
435 |
)
|
436 |
|
437 |
+
|
438 |
if __name__ == "__main__":
|
439 |
print("-------------------------------------------------------------")
|
440 |
print("Verifying VGG19 model status...")
|
|
|
453 |
print("Launching Gradio App...")
|
454 |
print("Access the app in your browser, usually at: http://127.0.0.1:7860")
|
455 |
print("-------------------------------------------------------------")
|
|
|
456 |
demo.launch(share=False, debug=False)
|