skallewag commited on
Commit
5490a38
·
verified ·
1 Parent(s): 0efc456

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -0
app.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # SEEM -- Segment Everything Everywhere All At Once
3
+ # Copyright (c) 2022 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
6
+ # --------------------------------------------------------
7
+
8
+ import os
9
+ import warnings
10
+ import PIL
11
+ from PIL import Image
12
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
13
+
14
+ import gradio as gr
15
+ import torch
16
+ import argparse
17
+ import whisper
18
+ import numpy as np
19
+
20
+ from gradio import processing_utils
21
+ from modeling.BaseModel import BaseModel
22
+ from modeling import build_model
23
+ from utils.distributed import init_distributed
24
+ from utils.arguments import load_opt_from_config_files
25
+ from utils.constants import COCO_PANOPTIC_CLASSES
26
+
27
+ from demo.seem.tasks import *
28
+
29
+ def parse_option():
30
+ parser = argparse.ArgumentParser('SEEM Demo', add_help=False)
31
+ parser.add_argument('--conf_files', default="configs/seem/focall_unicl_lang_demo.yaml", metavar="FILE", help='path to config file', )
32
+ cfg = parser.parse_args()
33
+ return cfg
34
+
35
+ '''
36
+ build args
37
+ '''
38
+ cfg = parse_option()
39
+ opt = load_opt_from_config_files([cfg.conf_files])
40
+ opt = init_distributed(opt)
41
+
42
+ # META DATA
43
+ cur_model = 'None'
44
+ if 'focalt' in cfg.conf_files:
45
+ pretrained_pth = os.path.join("seem_focalt_v0.pt")
46
+ if not os.path.exists(pretrained_pth):
47
+ os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focalt_v0.pt"))
48
+ cur_model = 'Focal-T'
49
+ elif 'focal' in cfg.conf_files:
50
+ pretrained_pth = os.path.join("seem_focall_v0.pt")
51
+ if not os.path.exists(pretrained_pth):
52
+ os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focall_v0.pt"))
53
+ cur_model = 'Focal-L'
54
+
55
+ '''
56
+ build model
57
+ '''
58
+ model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().cuda()
59
+ with torch.no_grad():
60
+ model.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True)
61
+
62
+ '''
63
+ audio
64
+ '''
65
+ audio = whisper.load_model("base")
66
+
67
+ @torch.no_grad()
68
+ def inference(image, task, *args, **kwargs):
69
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
70
+ if 'Video' in task:
71
+ return interactive_infer_video(model, audio, image, task, *args, **kwargs)
72
+ else:
73
+ return interactive_infer_image(model, audio, image, task, *args, **kwargs)
74
+
75
+ class ImageMask(gr.components.Image):
76
+ """
77
+ Sets: source="canvas", tool="sketch"
78
+ """
79
+
80
+ is_template = True
81
+
82
+ def __init__(self, **kwargs):
83
+ super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
84
+
85
+ def preprocess(self, x):
86
+ return super().preprocess(x)
87
+
88
+ class Video(gr.components.Video):
89
+ """
90
+ Sets: source="canvas", tool="sketch"
91
+ """
92
+
93
+ is_template = True
94
+
95
+ def __init__(self, **kwargs):
96
+ super().__init__(source="upload", **kwargs)
97
+
98
+ def preprocess(self, x):
99
+ return super().preprocess(x)
100
+
101
+
102
+ '''
103
+ launch app
104
+ '''
105
+ title = "SEEM: Segment Everything Everywhere All At Once"
106
+ description = """
107
+ <div style="text-align: center; font-weight: bold;">
108
+ <span style="font-size: 18px" id="paper-info">
109
+ [<a href="https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once" target="_blank">GitHub</a>]
110
+ [<a href="https://arxiv.org/pdf/2304.06718.pdf" target="_blank">arXiv</a>]
111
+ </span>
112
+ </div>
113
+ <div style="text-align: left; font-weight: bold;">
114
+ <br>
115
+ &#x1F32A Note: The current model is run on <span style="color:blue;">SEEM {}</span>, for <span style="color:blue;">best performance</span> refer to <a href="https://huggingface.co/spaces/xdecoder/SEEM" target="_blank"><span style="color:red;">our demo</span></a>.
116
+ </p>
117
+ </div>
118
+ """.format(cur_model)
119
+
120
+ '''Usage
121
+ Instructions:
122
+ &#x1F388 Try our default examples first (Sketch is not automatically drawed on input and example image);
123
+ &#x1F388 For video demo, it takes about 30-60s to process, please refresh if you meet an error on uploading;
124
+ &#x1F388 Upload an image/video (If you want to use referred region of another image please check "Example" and upload another image in referring image panel);
125
+ &#x1F388 Select at least one type of prompt of your choice (If you want to use referred region of another image please check "Example");
126
+ &#x1F388 Remember to provide the actual prompt for each promt type you select, otherwise you will meet an error (e.g., rember to draw on the referring image);
127
+ &#x1F388 Our model by default support the vocabulary of COCO 133 categories, others will be classified to 'others' or misclassifed.
128
+ '''
129
+
130
+ article = "The Demo is Run on SEEM-Tiny."
131
+ inputs = [ImageMask(label="[Stroke] Draw on Image",type="pil"), gr.inputs.CheckboxGroup(choices=["Stroke", "Example", "Text", "Audio", "Video", "Panoptic"], type="value", label="Interative Mode"), ImageMask(label="[Example] Draw on Referring Image",type="pil"), gr.Textbox(label="[Text] Referring Text"), gr.Audio(label="[Audio] Referring Audio", source="microphone", type="filepath"), gr.Video(label="[Video] Referring Video Segmentation",format="mp4",interactive=True)]
132
+ gr.Interface(
133
+ fn=inference,
134
+ inputs=inputs,
135
+ outputs=[
136
+ gr.outputs.Image(
137
+ type="pil",
138
+ label="Segmentation Results (COCO classes as label)"),
139
+ gr.Video(
140
+ label="Video Segmentation Results (COCO classes as label)", format="mp4"
141
+ ),
142
+ ],
143
+ examples=[
144
+ ["demo/seem/examples/corgi1.webp", ["Text"], "demo/seem/examples/corgi2.jpg", "The corgi.", None, None],
145
+ ["demo/seem/examples/river1.png", ["Text", "Audio"], "demo/seem/examples/river2.png", "The green trees.", "demo/seem/examples/river1.wav", None],
146
+ ["demo/seem/examples/zebras1.jpg", ["Example"], "demo/seem/examples/zebras2.jpg", "", None, None],
147
+ ["demo/seem/examples/fries1.png", ["Example"], "demo/seem/examples/fries2.png", "", None, None],
148
+ ["demo/seem/examples/placeholder.png", ["Video"], "demo/seem/examples/ref_vase.JPG", "", None, "demo/seem/examples/vasedeck.mp4"],
149
+ ],
150
+ title=title,
151
+ description=description,
152
+ article=article,
153
+ allow_flagging='never',
154
+ cache_examples=False,
155
+ ).launch(share=True)