Spaces:
Runtime error
Runtime error
File size: 1,205 Bytes
e368316 8406498 e368316 8406498 1da0d18 8406498 1da0d18 8406498 1da0d18 8406498 1da0d18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from PIL import Image
# Load the pipeline
pipeline = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell")
def merge_images(model_image, cloth_image):
# Save uploaded images temporarily
model_image.save("model_image.png")
cloth_image.save("cloth_image.png")
# Create prompt
prompt = f"A model wearing the clothing from the second image model_image.png and cloth_image.png"
# Generate the merged image
output_image = pipeline(
prompt,
guidance_scale=0.8,
num_inference_steps=4,
max_sequence_length=256,
generator=torch.Generator("cpu").manual_seed(0)
).images[0]
return output_image
# Create the Gradio interface
iface = gr.Interface(
fn=merge_images,
inputs=[
gr.Image(type="pil", label="Upload model image"),
gr.Image(type="pil", label="Upload clothing image")
],
outputs=gr.Image(type="pil", label="Result"),
title="Virtual Clothing Try-On",
description="Upload a model image and a clothing image to see how the outfit looks on the model."
)
# Launch the interface
iface.launch() |