qwertyforce commited on
Commit
e214012
·
verified ·
1 Parent(s): f4b18dc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ from statistics import mean
6
+ Image.MAX_IMAGE_PIXELS = None
7
+
8
+ def read_img_file(f):
9
+ img = Image.open(f)
10
+ if img.mode != 'RGB':
11
+ img = img.convert('RGB')
12
+ return img
13
+
14
+ _transform_test_random=transforms.Compose([
15
+ transforms.RandomCrop((256,256)),
16
+ transforms.ToTensor(),
17
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
18
+
19
+ _transform_test_random_vit = transforms.Compose([
20
+ transforms.RandomCrop((252,252)),
21
+ transforms.ToTensor(),
22
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
23
+
24
+ def detect(img, model_choices):
25
+ if model_choices == "EVA-02 ViT L/14":
26
+ model = torch.load("./model_eva.pth",map_location="cpu").cpu().eval()
27
+ _transform = _transform_test_random_vit
28
+ elif model_choices == "ConvNext Large":
29
+ model = torch.load("./model_convnext.pth",map_location="cpu").cpu().eval()
30
+ _transform = _transform_test_random
31
+ elif model_choices == "EfficientNet-V2 B0":
32
+ model = torch.load("./model_effnet.pth",map_location="cpu").cpu().eval()
33
+ _transform = _transform_test_random
34
+
35
+ output = ""
36
+ with torch.inference_mode():
37
+ tmp=[]
38
+ for _ in range(10):
39
+ img_random_crop = _transform(img)
40
+ outputs = model.forward(img_random_crop.unsqueeze(0))
41
+ outputs = torch.sigmoid(outputs).cpu().numpy()
42
+ tmp.append(outputs[0][0])
43
+ output+=f"{str(tmp)}\n"
44
+ output+=f"10 try method: {mean(tmp)}\n"
45
+ # print(tmp)
46
+ # print("10 try method: ", mean(tmp))
47
+
48
+ with torch.inference_mode():
49
+ img_crop = _transform(img)
50
+ outputs = model.forward(img_crop.unsqueeze(0))
51
+ outputs = torch.sigmoid(outputs).cpu().numpy()
52
+ output+=f"1 try method: {outputs}\n"
53
+ # print("1 try method: ",outputs)
54
+ return output
55
+
56
+ model_choices = ["ConvNext Large", "EVA-02 ViT L/14", "EfficientNet-V2 B0"]
57
+
58
+ demo = gr.Interface(fn=detect,
59
+ inputs=[gr.Image(type="pil", label="Input Image"),
60
+ gr.Radio(
61
+ model_choices,
62
+ type="value",
63
+ value="EVA-02 ViT L/14",
64
+ label="Choose Detector Model",
65
+ )], outputs="text")
66
+ demo.launch()