Saad0KH commited on
Commit
3ce4611
ยท
verified ยท
1 Parent(s): c4c6cd9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -175,7 +175,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
175
  pipe.to(device)
176
  pipe.unet_encoder.to(device)
177
 
178
- garm_img = garm_img.convert("RGB").resize((384, 512))
179
  human_img_orig = dict["background"].convert("RGB")
180
 
181
  if is_checked_crop:
@@ -188,9 +188,9 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
188
  bottom = (height + target_height) / 2
189
  cropped_img = human_img_orig.crop((left, top, right, bottom))
190
  crop_size = cropped_img.size
191
- human_img = cropped_img.resize((384, 512))
192
  else:
193
- human_img = human_img_orig.resize((384, 512))
194
 
195
  if is_checked:
196
  keypoints = openpose_model(human_img.resize((384, 512)))
@@ -198,7 +198,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
198
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
199
  mask = mask.resize((768, 1024))
200
  else:
201
- mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((384, 512)))
202
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
203
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
204
 
@@ -208,7 +208,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
208
  args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
209
  pose_img = args.func(args, human_img_arg)
210
  pose_img = pose_img[:, :, ::-1]
211
- pose_img = Image.fromarray(pose_img).resize((384, 512))
212
 
213
  with torch.no_grad():
214
  with torch.cuda.amp.autocast():
@@ -264,7 +264,7 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
264
  image=human_img,
265
  height=1024,
266
  width=768,
267
- ip_adapter_image=garm_img.resize((384, 512)),
268
  guidance_scale=2.0,
269
  )[0]
270
 
 
175
  pipe.to(device)
176
  pipe.unet_encoder.to(device)
177
 
178
+ garm_img = garm_img.convert("RGB").resize((768, 1024))
179
  human_img_orig = dict["background"].convert("RGB")
180
 
181
  if is_checked_crop:
 
188
  bottom = (height + target_height) / 2
189
  cropped_img = human_img_orig.crop((left, top, right, bottom))
190
  crop_size = cropped_img.size
191
+ human_img = cropped_img.resize((768, 1024))
192
  else:
193
+ human_img = human_img_orig.resize((768, 1024))
194
 
195
  if is_checked:
196
  keypoints = openpose_model(human_img.resize((384, 512)))
 
198
  mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
199
  mask = mask.resize((768, 1024))
200
  else:
201
+ mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
202
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
203
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
204
 
 
208
  args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
209
  pose_img = args.func(args, human_img_arg)
210
  pose_img = pose_img[:, :, ::-1]
211
+ pose_img = Image.fromarray(pose_img).resize((768, 1024))
212
 
213
  with torch.no_grad():
214
  with torch.cuda.amp.autocast():
 
264
  image=human_img,
265
  height=1024,
266
  width=768,
267
+ ip_adapter_image=garm_img.resize((768, 1024)),
268
  guidance_scale=2.0,
269
  )[0]
270