Custom_ResNet / app.py
HemaAM's picture
Update app.py
22dd6ba
## Import the required Modules/Packages
import pandas as pd
import torch
from torch.nn import functional as F
import torchvision
from torchvision import transforms
import numpy as np
import gradio as gr
from PIL import Image
from collections import OrderedDict
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from model import custResNet
classes = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
mis_classified_df = pd.read_csv('misclassified_images.csv')
mis_classified_df['ground_truths'] = mis_classified_df['ground_truths'].map(classes)
mis_classified_df['predicted_vals'] = mis_classified_df['predicted_vals'].map(classes)
mis_classified_df = mis_classified_df.sample(frac=1)
device = torch.device("cuda")
model1 = custResNet()
model1.load_state_dict(torch.load('cust_resnet_model.pth', map_location=torch.device('cpu')), strict=False)
model1.eval()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.49139968, 0.48215827, 0.44653124], std=[0.24703233, 0.24348505, 0.26158768])
])
inv_transform = transforms.Normalize(mean=[-(0.49139968/0.24703233), -(0.48215827/0.24348505), -(0.44653124/0.26158768)], std=[(0.24703233), (1/0.24348505), (1/0.26158768)])
def get_target_layer(target_layer):
if (target_layer==4):
result = [model1.block3]
elif (target_layer==3):
result = [model1.block2]
elif (target_layer==2):
result = [model1.block1]
elif (target_layer==1):
result = [model1.prep_block]
else:
result = [model1.block3]
return result
grad_cam_call_list = [GradCAM(model=model1, target_layers=get_target_layer(i), use_cuda=(device == 'cuda')) for i in range(4)]
def classify_image(input_image, top_classes=3, grad_cam=True, target_layers=[2, 3], transparency=0.7):
input_ = transform(input_image).unsqueeze(0)
output = model1(input_)
output = F.softmax(output.flatten(), dim=-1)
confidences = [(classes[i], float(output[i])) for i in range(10)]
confidences.sort(key=lambda x: x[1], reverse=True)
confidences = OrderedDict(confidences[:top_classes])
label = torch.argmax(output).item()
results = []
if grad_cam:
for layer in target_layers:
grad_cam = grad_cam_call_list[layer]
targets = [ClassifierOutputTarget(label)]
grayscale_cam = grad_cam(input_tensor=input_, targets=targets)
grayscale_cam = grayscale_cam[0, :]
output_image = show_cam_on_image(input_image / 255, grayscale_cam, use_rgb=True, image_weight=transparency)
results.append((output_image, f"Layer {layer - 4}"))
else:
results.append((input_image, "Input"))
return results, confidences
demo1 = gr.Interface(
fn=classify_image,
inputs=[
gr.Image(shape=(32, 32), label="Input Image", value='test_images/cat.jpg'),
gr.Slider(1, 10, value=3, step=1, label="Number of Top Classes"),
gr.Checkbox(label="Show GradCAM?", value=True),
#gr.Slider(-4, -1, value=-2, step=1, label="Which Layer?"),
gr.CheckboxGroup(["-4", "-3", "-2", "-1"], value=["-2", "-1"], label="Which Network Layer(s)?", type='index'),
gr.Slider(0, 1, value=0.7, label="Transparency", step=0.1)
],
outputs=[gr.Gallery(label="Output Images", columns=2, rows=2),
gr.Label(label='Top Classes')],
examples=[[f'test_images/{k}.jpg'] for k in classes.values()]
)
def show_mis_classifications(num_examples=20, grad_cam=True, target_layer=-2, transparency=0.5):
result = list()
for index, row in mis_classified_df.iterrows():
image = np.asarray(Image.open(f'misclassified_examples/{index}.jpg'))
output_image, confidence = classify_image(image, top_classes=1, grad_cam=grad_cam, target_layers=[4+target_layer],
transparency=transparency)
truth = row['ground_truths']
predicted = list(confidence)[0]
if truth != predicted:
result.append((output_image[0][0], f"{row['ground_truths']} / {predicted}"))
if len(result) >= num_examples:
break
return result
demo2 = gr.Interface(
fn=show_mis_classifications,
inputs=[
gr.Number(value=20, minimum=1, maximum=len(mis_classified_df), label="No. of missclassified Examples", precision=0),
gr.Checkbox(label="Show GradCAM?", value=True),
gr.Slider(-4, -1, value=-2, step=1, label="Which Layer?"),
gr.Slider(0, 1, value=0.7, label="Transparency", step=0.1),
],
outputs=[gr.Gallery(label="Missclassified Images (Truth / Predicted)", columns=4)]
)
demo = gr.TabbedInterface([demo1, demo2], ["Examples", "Misclassified Examples"])
demo.launch(debug=True)