File size: 8,325 Bytes
504c7e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
# Copyright (C) 2025 NVIDIA Corporation. All rights reserved.
#
# This work is licensed under the LICENSE file
# located at the root directory.
import cv2
import numpy as np
from PIL import Image, ImageDraw
import torch
import matplotlib.pyplot as plt
from skimage import filters
from IPython.display import display
def gaussian_blur(heatmap, kernel_size=7):
# Shape of heatmap: (H, W)
heatmap = heatmap.cpu().numpy()
heatmap = cv2.GaussianBlur(heatmap, (kernel_size, kernel_size), 0)
heatmap = torch.tensor(heatmap)
return heatmap
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return cam
def show_image_and_heatmap(heatmap: torch.Tensor, image: Image.Image, relevnace_res: int = 256, interpolation: str = 'bilinear', gassussian_kernel_size: int = 3):
image = image.resize((relevnace_res, relevnace_res))
image = np.array(image)
image = (image - image.min()) / (image.max() - image.min())
# Apply gaussian blur to heatmap
# heatmap = gaussian_blur(heatmap, kernel_size=gassussian_kernel_size)
# heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())
# otsu_thr = filters.threshold_otsu(heatmap.cpu().numpy())
# heatmap = (heatmap > otsu_thr).to(heatmap.dtype)
heatmap = heatmap.reshape(1, 1, heatmap.shape[-1], heatmap.shape[-1])
heatmap = torch.nn.functional.interpolate(heatmap, size=relevnace_res, mode=interpolation)
heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())
heatmap = heatmap.reshape(relevnace_res, relevnace_res).cpu()
vis = show_cam_on_image(image, heatmap)
vis = np.uint8(255 * vis)
vis = cv2.cvtColor(np.array(vis), cv2.COLOR_RGB2BGR)
vis = vis.astype(np.uint8)
vis = Image.fromarray(vis).resize((relevnace_res, relevnace_res))
return vis
def show_only_heatmap(heatmap: torch.Tensor, relevnace_res: int = 256, interpolation: str = 'bilinear', gassussian_kernel_size: int = 3):
# Apply gaussian blur to heatmap
# heatmap = gaussian_blur(heatmap, kernel_size=gassussian_kernel_size)
heatmap = heatmap.reshape(1, 1, heatmap.shape[-1], heatmap.shape[-1])
heatmap = torch.nn.functional.interpolate(heatmap, size=relevnace_res, mode=interpolation)
heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min())
heatmap = heatmap.reshape(relevnace_res, relevnace_res).cpu()
vis = heatmap
vis = np.uint8(255 * vis)
# Show in black and white
vis = cv2.cvtColor(np.array(vis), cv2.COLOR_GRAY2BGR)
vis = Image.fromarray(vis).resize((relevnace_res, relevnace_res))
return vis
def visualize_tokens_attentions(attention, tokens, image, heatmap_interpolation="nearest", show_on_image=True):
# Tokens: list of strings
# attention: tensor of shape (batch_size, num_tokens, width, height)
token_vis = []
for j, token in enumerate(tokens):
if j >= attention.shape[0]:
break
if show_on_image:
vis = show_image_and_heatmap(attention[j], image, relevnace_res=512, interpolation=heatmap_interpolation)
else:
vis = show_only_heatmap(attention[j], relevnace_res=512, interpolation=heatmap_interpolation)
token_vis.append((token, vis))
# Display the token and the attention map in a grid, with K tokens per row
K = 4
n_rows = (len(token_vis) + K - 1) // K # Ceiling division
fig, axs = plt.subplots(n_rows, K, figsize=(K*5, n_rows*5))
for i, (token, vis) in enumerate(token_vis):
row, col = divmod(i, K)
if n_rows > 1:
ax = axs[row, col]
elif K > 1:
ax = axs[col]
else:
ax = axs
ax.imshow(vis)
ax.set_title(token)
ax.axis("off")
# Hide unused subplots
for j in range(i + 1, n_rows * K):
row, col = divmod(j, K)
if n_rows > 1:
axs[row, col].axis('off')
elif K > 1:
axs[col].axis('off')
plt.tight_layout()
# We want to return the figure so that we can save it to a file
return fig
def show_images(images, titles=None, size=1024, max_row_length=5, figsize=None, col_height=10, save_path=None):
if isinstance(images, Image.Image):
images = [images]
if len(images) == 1:
img = images[0]
img = img.resize((size, size))
plt.imshow(img)
plt.axis('off')
if titles is not None:
plt.title(titles[0])
if save_path:
plt.savefig(save_path, bbox_inches='tight', dpi=150)
plt.show()
else:
images = [img.resize((size, size)) for img in images]
# Check if the number of titles matches the number of images
if titles is not None:
assert len(images) == len(titles), "Number of titles should match the number of images"
n_images = len(images)
n_cols = min(n_images, max_row_length)
n_rows = (n_images + n_cols - 1) // n_cols # Calculate the number of rows needed
if figsize is None:
figsize=(n_cols * col_height, n_rows * col_height)
fig, axs = plt.subplots(n_rows, n_cols, figsize=figsize)
axs = axs.flatten() if isinstance(axs, np.ndarray) else [axs]
# Display images in the subplots
for i, img in enumerate(images):
axs[i].imshow(img)
if titles is not None:
axs[i].set_title(titles[i])
axs[i].axis("off")
# Turn off any unused subplots
for ax in axs[len(images):]:
ax.axis("off")
if save_path:
plt.savefig(save_path, bbox_inches='tight', dpi=150)
plt.show()
def show_tensors(tensors, titles=None, size=None, max_row_length=5):
# Shape of tensors: List[Tensor[H, W]]
if size is not None:
tensors = [torch.nn.functional.interpolate(t.unsqueeze(0).unsqueeze(0), size=(size, size), mode='bilinear').squeeze() for t in tensors]
if len(tensors) == 1:
plt.imshow(tensors[0].cpu().numpy())
plt.axis('off')
if titles is not None:
plt.title(titles[0])
plt.show()
else:
# Check if the number of titles matches the number of images
if titles is not None:
assert len(tensors) == len(titles), "Number of titles should match the number of images"
n_tensors = len(tensors)
n_cols = min(n_tensors, max_row_length)
n_rows = (n_tensors + n_cols - 1) // n_cols
fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 10, n_rows * 10))
axs = axs.flatten() if isinstance(axs, np.ndarray) else [axs]
for i, tensor in enumerate(tensors):
axs[i].imshow(tensor.cpu().numpy())
if titles is not None:
axs[i].set_title(titles[i])
axs[i].axis("off")
for ax in axs[len(tensors):]:
ax.axis("off")
plt.show()
def draw_bboxes_on_image(image, bboxes, color="red", thickness=2):
image = image.copy()
draw = ImageDraw.Draw(image)
for bbox in bboxes:
draw.rectangle(bbox, outline=color, width=thickness)
return image
def draw_points_on_pil_image(pil_image, point_coords, point_color="red", radius=5):
"""
Draw points (circles) on a PIL image and return the modified image.
:param pil_image: PIL Image (e.g., sam_masked_image)
:param point_coords: An array-like of shape (N, 2), with x,y coordinates
:param point_color: Color of the point (default 'red')
:param radius: Radius of the drawn circles
:return: PIL Image with points drawn
"""
# Copy so we don't modify the original
out_img = pil_image.copy()
draw = ImageDraw.Draw(out_img)
# Draw each point
for x, y in point_coords:
# Calculate bounding box of the circle
left_up_point = (x - radius, y - radius)
right_down_point = (x + radius, y + radius)
# Draw the circle
draw.ellipse([left_up_point, right_down_point], fill=point_color, outline=point_color)
return out_img |