File size: 2,254 Bytes
b521671
 
 
 
 
 
 
 
 
 
 
 
 
 
dd65808
b521671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from gradio import Label
from icevision.all import *
import gradio as gr
from icevision.models.checkpoint import *
import PIL
import gradio as gr
import os

from icevision.models.inference_sahi import IceSahiModel




# Load model
checkpoint_path = "vfnet_resnet50ms2x_640_bs8_maxbbox500_10272022.pth"
checkpoint_and_model = model_from_checkpoint(checkpoint_path)
model = checkpoint_and_model["model"]
model_type = checkpoint_and_model["model_type"]
class_map = checkpoint_and_model["class_map"]

# Transforms
img_size = checkpoint_and_model["img_size"]
valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()])

# Populate examples in Gradio interface
examples = [
    ['./sqlot.jpg'],
]
sahimodel = IceSahiModel(model_type=model_type, model=model, class_map=class_map, tfms=valid_tfms, confidence_threshold=0.4)

def show_preds(input_image):
    img = PIL.Image.fromarray(input_image, "RGB")

    pred_dict = sahimodel.get_sliced_prediction(
                img,
                keep_sahi_format=False,
                return_img=True,
                slice_height = 512,
                slice_width = 512,
                overlap_height_ratio = 0.2,
                overlap_width_ratio = 0.2,
                display_label=True, 
                display_bbox=True)


    # pred_dict = model_type.end2end_detect(img, valid_tfms, model, 
    #                                       class_map=class_map, 
    #                                       detection_threshold=0.5,
    #                                       display_label=True, 
    #                                       display_bbox=True, 
    #                                       return_img=True, 
    #                                       font_size=15,
    #                                       label_color="#FF59D6")
    return pred_dict["img"]

gr_interface = gr.Interface(
    fn=show_preds,
    inputs=["image"],
    outputs=[gr.outputs.Image(type="pil", label="VFNET Inference with Sahi")],
    title="Spaces Empty or Not?",
    description="A VFNET model that detects whether parking spaces are empty or not. Upload an image or click an example image below to use.",
    examples=examples,
)
gr_interface.launch(inline=False, share=False, debug=True)