chenlei
commited on
Commit
·
8c19fbe
1
Parent(s):
bfa0351
update
Browse files- ootd/inference_ootd_dc.py +4 -4
- run/gradio_ootd.py +119 -119
ootd/inference_ootd_dc.py
CHANGED
@@ -24,10 +24,10 @@ import torch.nn.functional as F
|
|
24 |
from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
25 |
from transformers import CLIPTextModel, CLIPTokenizer
|
26 |
|
27 |
-
VIT_PATH = "
|
28 |
-
VAE_PATH = "
|
29 |
-
UNET_PATH = "
|
30 |
-
MODEL_PATH =
|
31 |
|
32 |
class OOTDiffusionDC:
|
33 |
|
|
|
24 |
from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
25 |
from transformers import CLIPTextModel, CLIPTokenizer
|
26 |
|
27 |
+
VIT_PATH = "openai/clip-vit-large-patch14"
|
28 |
+
VAE_PATH = "levihsu/OOTDiffusion"
|
29 |
+
UNET_PATH = "levihsu/OOTDiffusion"
|
30 |
+
MODEL_PATH ="/home/user/app/checkpoints/ootd"
|
31 |
|
32 |
class OOTDiffusionDC:
|
33 |
|
run/gradio_ootd.py
CHANGED
@@ -21,9 +21,9 @@ openpose_model_hd = OpenPose(0)
|
|
21 |
parsing_model_hd = Parsing(0)
|
22 |
ootd_model_hd = OOTDiffusionHD(0)
|
23 |
|
24 |
-
openpose_model_dc = OpenPose(0)
|
25 |
-
parsing_model_dc = Parsing(0)
|
26 |
-
ootd_model_dc = OOTDiffusionDC(0)
|
27 |
|
28 |
|
29 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
@@ -67,41 +67,41 @@ def process_hd(vton_img, garm_img, n_samples, n_steps, image_scale, seed):
|
|
67 |
|
68 |
return images
|
69 |
|
70 |
-
def process_dc(vton_img, garm_img, category, n_samples, n_steps, image_scale, seed):
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
|
89 |
-
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
|
104 |
-
|
105 |
|
106 |
|
107 |
block = gr.Blocks().queue()
|
@@ -169,92 +169,92 @@ with block:
|
|
169 |
run_button.click(fn=process_hd, inputs=ips, outputs=[result_gallery])
|
170 |
|
171 |
|
172 |
-
with gr.Row():
|
173 |
-
|
174 |
-
with gr.Row():
|
175 |
-
|
176 |
-
with gr.Row():
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
with gr.Column():
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
|
257 |
-
ips_dc = [vton_img_dc, garm_img_dc, category_dc, n_samples_dc, n_steps_dc, image_scale_dc, seed_dc]
|
258 |
-
run_button_dc.click(fn=process_dc, inputs=ips_dc, outputs=[result_gallery_dc])
|
259 |
|
260 |
block.launch(server_name='0.0.0.0', server_port=7865)
|
|
|
21 |
parsing_model_hd = Parsing(0)
|
22 |
ootd_model_hd = OOTDiffusionHD(0)
|
23 |
|
24 |
+
# openpose_model_dc = OpenPose(0)
|
25 |
+
# parsing_model_dc = Parsing(0)
|
26 |
+
# ootd_model_dc = OOTDiffusionDC(0)
|
27 |
|
28 |
|
29 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
|
|
67 |
|
68 |
return images
|
69 |
|
70 |
+
# def process_dc(vton_img, garm_img, category, n_samples, n_steps, image_scale, seed):
|
71 |
+
# model_type = 'dc'
|
72 |
+
# if category == 'Upper-body':
|
73 |
+
# category = 0
|
74 |
+
# elif category == 'Lower-body':
|
75 |
+
# category = 1
|
76 |
+
# else:
|
77 |
+
# category =2
|
78 |
|
79 |
+
# with torch.no_grad():
|
80 |
+
# garm_img = Image.open(garm_img).resize((768, 1024))
|
81 |
+
# vton_img = Image.open(vton_img).resize((768, 1024))
|
82 |
+
# keypoints = openpose_model_dc(vton_img.resize((384, 512)))
|
83 |
+
# model_parse, _ = parsing_model_dc(vton_img.resize((384, 512)))
|
84 |
|
85 |
+
# mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints)
|
86 |
+
# mask = mask.resize((768, 1024), Image.NEAREST)
|
87 |
+
# mask_gray = mask_gray.resize((768, 1024), Image.NEAREST)
|
88 |
|
89 |
+
# masked_vton_img = Image.composite(mask_gray, vton_img, mask)
|
90 |
|
91 |
+
# images = ootd_model_dc(
|
92 |
+
# model_type=model_type,
|
93 |
+
# category=category_dict[category],
|
94 |
+
# image_garm=garm_img,
|
95 |
+
# image_vton=masked_vton_img,
|
96 |
+
# mask=mask,
|
97 |
+
# image_ori=vton_img,
|
98 |
+
# num_samples=n_samples,
|
99 |
+
# num_steps=n_steps,
|
100 |
+
# image_scale=image_scale,
|
101 |
+
# seed=seed,
|
102 |
+
# )
|
103 |
|
104 |
+
# return images
|
105 |
|
106 |
|
107 |
block = gr.Blocks().queue()
|
|
|
169 |
run_button.click(fn=process_hd, inputs=ips, outputs=[result_gallery])
|
170 |
|
171 |
|
172 |
+
# with gr.Row():
|
173 |
+
# gr.Markdown("## Full-body")
|
174 |
+
# with gr.Row():
|
175 |
+
# gr.Markdown("***Support upper-body/lower-body/dresses; garment category must be paired!!!***")
|
176 |
+
# with gr.Row():
|
177 |
+
# with gr.Column():
|
178 |
+
# vton_img_dc = gr.Image(label="Model", sources='upload', type="filepath", height=384, value=model_dc)
|
179 |
+
# example = gr.Examples(
|
180 |
+
# label="Examples (upper-body/lower-body)",
|
181 |
+
# inputs=vton_img_dc,
|
182 |
+
# examples_per_page=7,
|
183 |
+
# examples=[
|
184 |
+
# os.path.join(example_path, 'model/model_8.png'),
|
185 |
+
# os.path.join(example_path, 'model/049447_0.jpg'),
|
186 |
+
# os.path.join(example_path, 'model/049713_0.jpg'),
|
187 |
+
# os.path.join(example_path, 'model/051482_0.jpg'),
|
188 |
+
# os.path.join(example_path, 'model/051918_0.jpg'),
|
189 |
+
# os.path.join(example_path, 'model/051962_0.jpg'),
|
190 |
+
# os.path.join(example_path, 'model/049205_0.jpg'),
|
191 |
+
# ])
|
192 |
+
# example = gr.Examples(
|
193 |
+
# label="Examples (dress)",
|
194 |
+
# inputs=vton_img_dc,
|
195 |
+
# examples_per_page=7,
|
196 |
+
# examples=[
|
197 |
+
# os.path.join(example_path, 'model/model_9.png'),
|
198 |
+
# os.path.join(example_path, 'model/052767_0.jpg'),
|
199 |
+
# os.path.join(example_path, 'model/052472_0.jpg'),
|
200 |
+
# os.path.join(example_path, 'model/053514_0.jpg'),
|
201 |
+
# os.path.join(example_path, 'model/053228_0.jpg'),
|
202 |
+
# os.path.join(example_path, 'model/052964_0.jpg'),
|
203 |
+
# os.path.join(example_path, 'model/053700_0.jpg'),
|
204 |
+
# ])
|
205 |
+
# with gr.Column():
|
206 |
+
# garm_img_dc = gr.Image(label="Garment", sources='upload', type="filepath", height=384, value=garment_dc)
|
207 |
+
# category_dc = gr.Dropdown(label="Garment category (important option!!!)", choices=["Upper-body", "Lower-body", "Dress"], value="Upper-body")
|
208 |
+
# example = gr.Examples(
|
209 |
+
# label="Examples (upper-body)",
|
210 |
+
# inputs=garm_img_dc,
|
211 |
+
# examples_per_page=7,
|
212 |
+
# examples=[
|
213 |
+
# os.path.join(example_path, 'garment/048554_1.jpg'),
|
214 |
+
# os.path.join(example_path, 'garment/049920_1.jpg'),
|
215 |
+
# os.path.join(example_path, 'garment/049965_1.jpg'),
|
216 |
+
# os.path.join(example_path, 'garment/049949_1.jpg'),
|
217 |
+
# os.path.join(example_path, 'garment/050181_1.jpg'),
|
218 |
+
# os.path.join(example_path, 'garment/049805_1.jpg'),
|
219 |
+
# os.path.join(example_path, 'garment/050105_1.jpg'),
|
220 |
+
# ])
|
221 |
+
# example = gr.Examples(
|
222 |
+
# label="Examples (lower-body)",
|
223 |
+
# inputs=garm_img_dc,
|
224 |
+
# examples_per_page=7,
|
225 |
+
# examples=[
|
226 |
+
# os.path.join(example_path, 'garment/051827_1.jpg'),
|
227 |
+
# os.path.join(example_path, 'garment/051946_1.jpg'),
|
228 |
+
# os.path.join(example_path, 'garment/051473_1.jpg'),
|
229 |
+
# os.path.join(example_path, 'garment/051515_1.jpg'),
|
230 |
+
# os.path.join(example_path, 'garment/051517_1.jpg'),
|
231 |
+
# os.path.join(example_path, 'garment/051988_1.jpg'),
|
232 |
+
# os.path.join(example_path, 'garment/051412_1.jpg'),
|
233 |
+
# ])
|
234 |
+
# example = gr.Examples(
|
235 |
+
# label="Examples (dress)",
|
236 |
+
# inputs=garm_img_dc,
|
237 |
+
# examples_per_page=7,
|
238 |
+
# examples=[
|
239 |
+
# os.path.join(example_path, 'garment/053290_1.jpg'),
|
240 |
+
# os.path.join(example_path, 'garment/053744_1.jpg'),
|
241 |
+
# os.path.join(example_path, 'garment/053742_1.jpg'),
|
242 |
+
# os.path.join(example_path, 'garment/053786_1.jpg'),
|
243 |
+
# os.path.join(example_path, 'garment/053790_1.jpg'),
|
244 |
+
# os.path.join(example_path, 'garment/053319_1.jpg'),
|
245 |
+
# os.path.join(example_path, 'garment/052234_1.jpg'),
|
246 |
+
# ])
|
247 |
+
# with gr.Column():
|
248 |
+
# result_gallery_dc = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True, scale=1)
|
249 |
+
# with gr.Column():
|
250 |
+
# run_button_dc = gr.Button(value="Run")
|
251 |
+
# n_samples_dc = gr.Slider(label="Images", minimum=1, maximum=4, value=1, step=1)
|
252 |
+
# n_steps_dc = gr.Slider(label="Steps", minimum=20, maximum=40, value=20, step=1)
|
253 |
+
# # scale_dc = gr.Slider(label="Scale", minimum=1.0, maximum=12.0, value=5.0, step=0.1)
|
254 |
+
# image_scale_dc = gr.Slider(label="Guidance scale", minimum=1.0, maximum=5.0, value=2.0, step=0.1)
|
255 |
+
# seed_dc = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=-1)
|
256 |
|
257 |
+
# ips_dc = [vton_img_dc, garm_img_dc, category_dc, n_samples_dc, n_steps_dc, image_scale_dc, seed_dc]
|
258 |
+
# run_button_dc.click(fn=process_dc, inputs=ips_dc, outputs=[result_gallery_dc])
|
259 |
|
260 |
block.launch(server_name='0.0.0.0', server_port=7865)
|