Spaces:
Saad0KH
/
Running on Zero

Saad0KH commited on
Commit
312e8fd
·
verified ·
1 Parent(s): 5131782

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -139,7 +139,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
139
  pipe.to(device)
140
  pipe.unet_encoder.to(device)
141
 
142
- garm_img = garm_img.convert("RGB").resize((705,705))
143
  human_img_orig = dict["background"].convert("RGB")
144
 
145
  if is_checked_crop:
@@ -152,17 +152,17 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
152
  bottom = (height + target_height) / 2
153
  cropped_img = human_img_orig.crop((left, top, right, bottom))
154
  crop_size = cropped_img.size
155
- human_img = cropped_img.resize((705,705))
156
  else:
157
- human_img = human_img_orig.resize((705,705))
158
 
159
  if is_checked:
160
  keypoints = openpose_model(human_img.resize((384, 512)))
161
  model_parse, _ = parsing_model(human_img.resize((384, 512)))
162
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
163
- mask = mask.resize((705,705))
164
  else:
165
- mask = dict['layers'][0].convert("RGB").resize((705,705))#pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((705,705)))
166
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
167
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
168
 
@@ -172,7 +172,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
172
  args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
173
  pose_img = args.func(args, human_img_arg)
174
  pose_img = pose_img[:, :, ::-1]
175
- pose_img = Image.fromarray(pose_img).resize((705,705))
176
 
177
  with torch.no_grad():
178
  with torch.cuda.amp.autocast():
@@ -226,10 +226,10 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
226
  cloth=garm_tensor.to(device, torch.float16),
227
  mask_image=mask,
228
  image=human_img,
229
- height=1024,
230
  width=768,
231
- ip_adapter_image=garm_img.resize((705,705)),
232
- guidance_scale=2.0,
233
  )[0]
234
 
235
  if is_checked_crop:
@@ -353,7 +353,7 @@ def generate_mask(human_img, categorie='upper_body'):
353
  keypoints = openpose_model(human_img_resized)
354
  model_parse, _ = parsing_model(human_img_resized)
355
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
356
- mask = mask.resize((705,705))
357
 
358
  # Redimensionner le masque à la taille d'origine de l'image
359
  mask_resized = mask.resize(human_img.size)
 
139
  pipe.to(device)
140
  pipe.unet_encoder.to(device)
141
 
142
+ garm_img = garm_img.convert("RGB").resize((768, 768))
143
  human_img_orig = dict["background"].convert("RGB")
144
 
145
  if is_checked_crop:
 
152
  bottom = (height + target_height) / 2
153
  cropped_img = human_img_orig.crop((left, top, right, bottom))
154
  crop_size = cropped_img.size
155
+ human_img = cropped_img.resize((768, 768))
156
  else:
157
+ human_img = human_img_orig.resize((768, 768))
158
 
159
  if is_checked:
160
  keypoints = openpose_model(human_img.resize((384, 512)))
161
  model_parse, _ = parsing_model(human_img.resize((384, 512)))
162
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
163
+ mask = mask.resize((768, 768))
164
  else:
165
+ mask = dict['layers'][0].convert("RGB").resize((768, 768))#pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 768)))
166
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
167
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
168
 
 
172
  args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
173
  pose_img = args.func(args, human_img_arg)
174
  pose_img = pose_img[:, :, ::-1]
175
+ pose_img = Image.fromarray(pose_img).resize((768, 768))
176
 
177
  with torch.no_grad():
178
  with torch.cuda.amp.autocast():
 
226
  cloth=garm_tensor.to(device, torch.float16),
227
  mask_image=mask,
228
  image=human_img,
229
+ height=768,
230
  width=768,
231
+ ip_adapter_image=garm_img.resize((768, 768)),
232
+ guidance_scale=6.0,
233
  )[0]
234
 
235
  if is_checked_crop:
 
353
  keypoints = openpose_model(human_img_resized)
354
  model_parse, _ = parsing_model(human_img_resized)
355
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
356
+ mask = mask.resize((768, 768))
357
 
358
  # Redimensionner le masque à la taille d'origine de l'image
359
  mask_resized = mask.resize(human_img.size)