Update app.py
Browse files
app.py
CHANGED
@@ -170,7 +170,6 @@ def save_image(img):
|
|
170 |
|
171 |
@spaces.GPU
|
172 |
def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie = 'upper_body'):
|
173 |
-
|
174 |
device = "cuda"
|
175 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
176 |
pipe.to(device)
|
@@ -183,10 +182,10 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
|
|
183 |
width, height = human_img_orig.size
|
184 |
target_width = int(min(width, height * (3 / 4)))
|
185 |
target_height = int(min(height, width * (4 / 3)))
|
186 |
-
left = (width - target_width)
|
187 |
-
top = (height - target_height)
|
188 |
-
right = (width + target_width)
|
189 |
-
bottom = (height + target_height)
|
190 |
cropped_img = human_img_orig.crop((left, top, right, bottom))
|
191 |
crop_size = cropped_img.size
|
192 |
human_img = cropped_img.resize((768, 1024))
|
@@ -196,19 +195,17 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
|
|
196 |
if is_checked:
|
197 |
keypoints = openpose_model(human_img.resize((384, 512)))
|
198 |
model_parse, _ = parsing_model(human_img.resize((384, 512)))
|
199 |
-
mask, mask_gray = get_mask_location('hd', categorie, model_parse, keypoints)
|
200 |
mask = mask.resize((768, 1024))
|
201 |
else:
|
202 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
203 |
-
|
204 |
mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
205 |
mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
|
206 |
|
207 |
-
# Potentially modify args.func to accept PIL images directly
|
208 |
human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
|
209 |
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
210 |
|
211 |
-
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts',
|
212 |
pose_img = args.func(args, human_img_arg)
|
213 |
pose_img = pose_img[:, :, ::-1]
|
214 |
pose_img = Image.fromarray(pose_img).resize((768, 1024))
|
|
|
170 |
|
171 |
@spaces.GPU
|
172 |
def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie = 'upper_body'):
|
|
|
173 |
device = "cuda"
|
174 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
175 |
pipe.to(device)
|
|
|
182 |
width, height = human_img_orig.size
|
183 |
target_width = int(min(width, height * (3 / 4)))
|
184 |
target_height = int(min(height, width * (4 / 3)))
|
185 |
+
left = (width - target_width) / 2
|
186 |
+
top = (height - target_height) / 2
|
187 |
+
right = (width + target_width) / 2
|
188 |
+
bottom = (height + target_height) / 2
|
189 |
cropped_img = human_img_orig.crop((left, top, right, bottom))
|
190 |
crop_size = cropped_img.size
|
191 |
human_img = cropped_img.resize((768, 1024))
|
|
|
195 |
if is_checked:
|
196 |
keypoints = openpose_model(human_img.resize((384, 512)))
|
197 |
model_parse, _ = parsing_model(human_img.resize((384, 512)))
|
198 |
+
mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
|
199 |
mask = mask.resize((768, 1024))
|
200 |
else:
|
201 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
|
|
202 |
mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
203 |
mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
|
204 |
|
|
|
205 |
human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
|
206 |
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
207 |
|
208 |
+
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
|
209 |
pose_img = args.func(args, human_img_arg)
|
210 |
pose_img = pose_img[:, :, ::-1]
|
211 |
pose_img = Image.fromarray(pose_img).resize((768, 1024))
|