Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -156,6 +156,7 @@ def generate_virtual_try_on(person_image, cloth_image, mask_image, pose_image,cl
|
|
156 |
cloth_pure = transform(cloth_image).unsqueeze(0).to(device)
|
157 |
mask_tensor = to_tensor(mask_image)[:1].unsqueeze(0).to(device) # Keep only one channel
|
158 |
pose_tensor = transform(pose_image).unsqueeze(0).to(device)
|
|
|
159 |
|
160 |
# Prepare text prompts
|
161 |
prompt = ["A person wearing the cloth"+cloth_des] # Example prompt
|
@@ -197,22 +198,22 @@ def generate_virtual_try_on(person_image, cloth_image, mask_image, pose_image,cl
|
|
197 |
|
198 |
with torch.no_grad():
|
199 |
images = pipe(
|
200 |
-
prompt_embeds=
|
201 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
202 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
203 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
204 |
num_inference_steps=args.num_inference_steps,
|
205 |
generator=generator,
|
206 |
strength=1.0,
|
207 |
-
pose_img=pose_tensor,
|
208 |
-
text_embeds_cloth=prompt_embeds_c,
|
209 |
-
cloth=cloth_pure,
|
210 |
-
mask_image=mask_tensor,
|
211 |
image=(person_tensor + 1.0) / 2.0,
|
212 |
height=args.height,
|
213 |
width=args.width,
|
214 |
guidance_scale=guidance_scale,
|
215 |
-
ip_adapter_image=image_embeds,
|
216 |
)[0]
|
217 |
|
218 |
# Convert output image to PIL format for display
|
|
|
156 |
cloth_pure = transform(cloth_image).unsqueeze(0).to(device)
|
157 |
mask_tensor = to_tensor(mask_image)[:1].unsqueeze(0).to(device) # Keep only one channel
|
158 |
pose_tensor = transform(pose_image).unsqueeze(0).to(device)
|
159 |
+
|
160 |
|
161 |
# Prepare text prompts
|
162 |
prompt = ["A person wearing the cloth"+cloth_des] # Example prompt
|
|
|
198 |
|
199 |
with torch.no_grad():
|
200 |
images = pipe(
|
201 |
+
prompt_embeds=prompt_embedsto.to(device,torch.float16),
|
202 |
+
negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16),
|
203 |
+
pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16),
|
204 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,torch.float16),
|
205 |
num_inference_steps=args.num_inference_steps,
|
206 |
generator=generator,
|
207 |
strength=1.0,
|
208 |
+
pose_img=pose_tensor.to(device,torch.float16),
|
209 |
+
text_embeds_cloth=prompt_embeds_c.to(device,torch.float16),
|
210 |
+
cloth=cloth_pure.to(device,torch.float16),
|
211 |
+
mask_image=mask_tensor.to(device,torch.float16),
|
212 |
image=(person_tensor + 1.0) / 2.0,
|
213 |
height=args.height,
|
214 |
width=args.width,
|
215 |
guidance_scale=guidance_scale,
|
216 |
+
ip_adapter_image=image_embeds.to(device,torch.float16),
|
217 |
)[0]
|
218 |
|
219 |
# Convert output image to PIL format for display
|