Spaces:
Runtime error
Runtime error
import torch | |
import gradio as gr | |
from PIL import Image | |
from src.model import get_model, apply_weights, copy_weight | |
from src.transform import crop, pad, gpu_crop | |
from torchvision.transforms import Normalize, ToTensor | |
from pathlib import Path | |
vocab = [ | |
"Actinic Keratosis", | |
"Basal Cell Carcinoma", | |
"Benign Keratosis", | |
"Dermatofibroma", | |
"Melanoma", | |
"Melanocytic Nevus", | |
"Vascular Lesion", | |
] | |
model = get_model() | |
state = torch.load("exported_model.pth", map_location="cpu") | |
apply_weights(model, state, copy_weight) | |
to_tensor = ToTensor() | |
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
def classify_image(inp): | |
inp = Image.fromarray(inp) | |
transformed_input = pad(crop(inp, (460, 460)), (460, 460)) | |
transformed_input = to_tensor(transformed_input).unsqueeze(0) | |
transformed_input = gpu_crop(transformed_input, (224, 224)) | |
transformed_input = norm(transformed_input) | |
model.eval() | |
with torch.no_grad(): | |
pred = model(transformed_input) | |
prob = torch.softmax(pred[0], dim=0) | |
confidences = {vocab[i]: float(prob[i]) for i in range(7)} | |
return confidences | |
iface = gr.Interface( | |
fn=classify_image, | |
inputs="image", | |
outputs=gr.Label(), | |
examples=[ | |
["ISIC_0024634_00.jpg"], | |
["ISIC_0032932_00.jpg"], | |
], | |
title="Skin Lesion Recognition using fast.ai", | |
description="Adapted from https://domingomery.ing.puc.cl/", | |
article="<p style='text-align: center'><a href='https://evertoncolombo.github.io/blog/posts/skin-lesion/Skin%20Lesion%20Recognition%20using%20fastai.html'>More info | <a href='https://www.dropbox.com/s/nzrvuoos7sgl5dh/exp4val.zip' >Dataset</a> <center><img src='https://visitor-badge.glitch.me/badge?page_id=e_colombo_skin_lesion' alt='visitor badge'></center></p>", | |
allow_flagging="never", | |
).launch() |