Spaces:
Saad0KH
/
Running on Zero

Saad0KH commited on
Commit
228b169
·
verified ·
1 Parent(s): 42bd0db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -16
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import requests
3
  from flask import Flask, request, jsonify,send_file
4
  from PIL import Image
@@ -132,7 +133,8 @@ def save_image(img):
132
  return unique_name
133
 
134
  @spaces.GPU
135
- def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie = 'upper_body'):
 
136
  device = "cuda"
137
  openpose_model.preprocessor.body_estimation.model.to(device)
138
  pipe.to(device)
@@ -156,20 +158,27 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
156
  human_img = human_img_orig.resize((768, 1024))
157
 
158
  if is_checked:
159
- keypoints = openpose_model(human_img.resize((384, 512)))
160
- model_parse, _ = parsing_model(human_img.resize((384, 512)))
161
- mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
 
162
  mask = mask.resize((768, 1024))
163
  else:
164
- mask = dict['layers'][0].convert("RGB").resize((768, 1024))#pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
 
165
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
166
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
167
 
168
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
169
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
170
 
171
- args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
172
- pose_img = args.func(args, human_img_arg)
 
 
 
 
 
173
  pose_img = pose_img[:, :, ::-1]
174
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
175
 
@@ -212,7 +221,10 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
212
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device, torch.float16)
213
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device, torch.float16)
214
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
215
- images = pipe(
 
 
 
216
  prompt_embeds=prompt_embeds.to(device, torch.float16),
217
  negative_prompt_embeds=negative_prompt_embeds.to(device, torch.float16),
218
  pooled_prompt_embeds=pooled_prompt_embeds.to(device, torch.float16),
@@ -236,13 +248,12 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
236
  human_img_orig.paste(out_img, (int(left), int(top)))
237
  return human_img_orig, mask_gray
238
  else:
239
- return images[0], mask_gray , mask
240
 
241
-
242
- @app.route('/tryon-v2', methods=['POST'])
243
- def tryon_v2():
244
 
245
- data = request.json
 
 
246
  human_image_data = data['human_image']
247
  garment_image_data = data['garment_image']
248
 
@@ -267,11 +278,13 @@ def tryon_v2():
267
  'composite': None
268
  }
269
 
270
- output_image, mask_image , mask = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed, categorie)
 
 
271
  return jsonify({
272
  'image_id': save_image(output_image),
273
- 'mask_gray_id' : save_image(mask_image),
274
- 'mask_id' : save_image(mask)
275
  })
276
 
277
  def clear_gpu_memory():
 
1
  import os
2
+ import asyncio
3
  import requests
4
  from flask import Flask, request, jsonify,send_file
5
  from PIL import Image
 
133
  return unique_name
134
 
135
  @spaces.GPU
136
+ # Assurez-vous que les fonctions de traitement utilisent asyncio
137
+ async def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie='upper_body'):
138
  device = "cuda"
139
  openpose_model.preprocessor.body_estimation.model.to(device)
140
  pipe.to(device)
 
158
  human_img = human_img_orig.resize((768, 1024))
159
 
160
  if is_checked:
161
+ # Utilisation de asyncio pour rendre le calcul non bloquant
162
+ keypoints = await asyncio.to_thread(openpose_model, human_img.resize((384, 512)))
163
+ model_parse, _ = await asyncio.to_thread(parsing_model, human_img.resize((384, 512)))
164
+ mask, mask_gray = await asyncio.to_thread(get_mask_location, 'hd', categorie, model_parse, keypoints)
165
  mask = mask.resize((768, 1024))
166
  else:
167
+ mask = dict['layers'][0].convert("RGB").resize((768, 1024))
168
+
169
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
170
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
171
 
172
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
173
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
174
 
175
+ # Création des arguments de façon non bloquante
176
+ args = await asyncio.to_thread(
177
+ apply_net.create_argument_parser().parse_args,
178
+ ('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda')
179
+ )
180
+
181
+ pose_img = await asyncio.to_thread(args.func, args, human_img_arg)
182
  pose_img = pose_img[:, :, ::-1]
183
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
184
 
 
221
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device, torch.float16)
222
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device, torch.float16)
223
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
224
+
225
+ # L'appel principal au modèle doit également être asynchrone
226
+ images = await asyncio.to_thread(
227
+ pipe,
228
  prompt_embeds=prompt_embeds.to(device, torch.float16),
229
  negative_prompt_embeds=negative_prompt_embeds.to(device, torch.float16),
230
  pooled_prompt_embeds=pooled_prompt_embeds.to(device, torch.float16),
 
248
  human_img_orig.paste(out_img, (int(left), int(top)))
249
  return human_img_orig, mask_gray
250
  else:
251
+ return images[0], mask_gray, mask
252
 
 
 
 
253
 
254
+ @app.route('/tryon-v2', methods=['POST'])
255
+ async def tryon_v2():
256
+ data = await request.json
257
  human_image_data = data['human_image']
258
  garment_image_data = data['garment_image']
259
 
 
278
  'composite': None
279
  }
280
 
281
+ # Exécuter le traitement principal de manière asynchrone
282
+ output_image, mask_image, mask = await start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed, categorie)
283
+
284
  return jsonify({
285
  'image_id': save_image(output_image),
286
+ 'mask_gray_id': save_image(mask_image),
287
+ 'mask_id': save_image(mask)
288
  })
289
 
290
  def clear_gpu_memory():