Spaces:
Paused
Paused
import numpy as np | |
from PIL import Image | |
from huggingface_hub import snapshot_download | |
from leffa.transform import LeffaTransform | |
from leffa.model import LeffaModel | |
from leffa.inference import LeffaInference | |
from leffa_utils.garment_agnostic_mask_predictor import AutoMasker | |
from leffa_utils.densepose_predictor import DensePosePredictor | |
from leffa_utils.utils import resize_and_center, list_dir, get_agnostic_mask_hd, get_agnostic_mask_dc | |
from preprocess.humanparsing.run_parsing import Parsing | |
from preprocess.openpose.run_openpose import OpenPose | |
import gradio as gr | |
# Download checkpoints | |
snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts") | |
class LeffaPredictor(object): | |
def __init__(self): | |
self.mask_predictor = AutoMasker( | |
densepose_path="./ckpts/densepose", | |
schp_path="./ckpts/schp", | |
) | |
self.densepose_predictor = DensePosePredictor( | |
config_path="./ckpts/densepose/densepose_rcnn_R_50_FPN_s1x.yaml", | |
weights_path="./ckpts/densepose/model_final_162be9.pkl", | |
) | |
self.parsing = Parsing( | |
atr_path="./ckpts/humanparsing/parsing_atr.onnx", | |
lip_path="./ckpts/humanparsing/parsing_lip.onnx", | |
) | |
self.openpose = OpenPose( | |
body_model_path="./ckpts/openpose/body_pose_model.pth", | |
) | |
vt_model_hd = LeffaModel( | |
pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting", | |
pretrained_model="./ckpts/virtual_tryon.pth", | |
dtype="float16", | |
) | |
self.vt_inference_hd = LeffaInference(model=vt_model_hd) | |
vt_model_dc = LeffaModel( | |
pretrained_model_name_or_path="./ckpts/stable-diffusion-inpainting", | |
pretrained_model="./ckpts/virtual_tryon_dc.pth", | |
dtype="float16", | |
) | |
self.vt_inference_dc = LeffaInference(model=vt_model_dc) | |
pt_model = LeffaModel( | |
pretrained_model_name_or_path="./ckpts/stable-diffusion-xl-1.0-inpainting-0.1", | |
pretrained_model="./ckpts/pose_transfer.pth", | |
dtype="float16", | |
) | |
self.pt_inference = LeffaInference(model=pt_model) | |
def leffa_predict( | |
self, | |
src_image_path, | |
ref_image_path, | |
control_type, | |
ref_acceleration=False, | |
step=50, | |
scale=2.5, | |
seed=42, | |
vt_model_type="viton_hd", | |
vt_garment_type="upper_body", | |
vt_repaint=False | |
): | |
assert control_type in [ | |
"virtual_tryon", "pose_transfer"], "Invalid control type: {}".format(control_type) | |
src_image = Image.open(src_image_path) | |
ref_image = Image.open(ref_image_path) | |
src_image = resize_and_center(src_image, 768, 1024) | |
ref_image = resize_and_center(ref_image, 768, 1024) | |
src_image_array = np.array(src_image) | |
# Mask | |
if control_type == "virtual_tryon": | |
src_image = src_image.convert("RGB") | |
model_parse, _ = self.parsing(src_image.resize((384, 512))) | |
keypoints = self.openpose(src_image.resize((384, 512))) | |
if vt_model_type == "viton_hd": | |
mask = get_agnostic_mask_hd( | |
model_parse, keypoints, vt_garment_type) | |
elif vt_model_type == "dress_code": | |
mask = get_agnostic_mask_dc( | |
model_parse, keypoints, vt_garment_type) | |
mask = mask.resize((768, 1024)) | |
elif control_type == "pose_transfer": | |
mask = Image.fromarray(np.ones_like(src_image_array) * 255) | |
# DensePose | |
if control_type == "virtual_tryon": | |
if vt_model_type == "viton_hd": | |
src_image_seg_array = self.densepose_predictor.predict_seg( | |
src_image_array)[:, :, ::-1] | |
src_image_seg = Image.fromarray(src_image_seg_array) | |
densepose = src_image_seg | |
elif vt_model_type == "dress_code": | |
src_image_iuv_array = self.densepose_predictor.predict_iuv( | |
src_image_array) | |
src_image_seg_array = src_image_iuv_array[:, :, 0:1] | |
src_image_seg_array = np.concatenate( | |
[src_image_seg_array] * 3, axis=-1) | |
src_image_seg = Image.fromarray(src_image_seg_array) | |
densepose = src_image_seg | |
elif control_type == "pose_transfer": | |
src_image_iuv_array = self.densepose_predictor.predict_iuv( | |
src_image_array)[:, :, ::-1] | |
src_image_iuv = Image.fromarray(src_image_iuv_array) | |
densepose = src_image_iuv | |
# Leffa | |
transform = LeffaTransform() | |
data = { | |
"src_image": [src_image], | |
"ref_image": [ref_image], | |
"mask": [mask], | |
"densepose": [densepose], | |
} | |
data = transform(data) | |
if control_type == "virtual_tryon": | |
if vt_model_type == "viton_hd": | |
inference = self.vt_inference_hd | |
elif vt_model_type == "dress_code": | |
inference = self.vt_inference_dc | |
elif control_type == "pose_transfer": | |
inference = self.pt_inference | |
output = inference( | |
data, | |
ref_acceleration=ref_acceleration, | |
num_inference_steps=step, | |
guidance_scale=scale, | |
seed=seed, | |
repaint=vt_repaint,) | |
gen_image = output["generated_image"][0] | |
return np.array(gen_image), np.array(mask), np.array(densepose) | |
def dehasoft(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint): | |
return self.leffa_predict(src_image_path, ref_image_path, "virtual_tryon", ref_acceleration, step, scale, seed, vt_model_type, vt_garment_type, vt_repaint) | |
def leffa_predict_pt(self, src_image_path, ref_image_path, ref_acceleration, step, scale, seed): | |
return self.leffa_predict(src_image_path, ref_image_path, "pose_transfer", ref_acceleration, step, scale, seed) | |
if __name__ == "__main__": | |
leffa_predictor = LeffaPredictor() | |
example_dir = "./ckpts/examples" | |
person1_images = list_dir(f"{example_dir}/person1") | |
person2_images = list_dir(f"{example_dir}/person2") | |
garment_images = list_dir(f"{example_dir}/garment") | |
# Özelleştirilmiş Tema | |
theme = gr.themes.Soft( | |
primary_hue="indigo", | |
secondary_hue="purple", | |
neutral_hue="gray", | |
radius_size="lg", | |
text_size="lg", | |
spacing_size="md", | |
).set( | |
body_background_fill="#f5f5f5", | |
background_fill_primary="#ffffff", | |
button_primary_background_fill="#4f46e5", | |
button_primary_background_fill_hover="#6b7280", | |
button_primary_text_color="#ffffff", | |
shadow="0 4px 6px rgba(0, 0, 0, 0.1)", | |
) | |
# Başlık ve Açıklama | |
title = "# Dehasoft AI Studio" | |
description = """ | |
Welcome to **Dehasoft AI Studio**! Transform appearances with virtual try-on or adjust poses with pose transfer using cutting-edge AI models. | |
Powered by VITON-HD, DressCode, and DeepFashion datasets. | |
""" | |
footer_note = """ | |
**Note:** Models are trained on academic datasets only. Virtual try-on leverages VITON-HD/DressCode, while pose transfer uses DeepFashion. | |
""" | |
with gr.Blocks(theme=theme, title="Dehasoft AI Studio") as demo: | |
gr.Markdown(title, elem_classes=["title"]) | |
gr.Markdown(description, elem_classes=["description"]) | |
with gr.Tabs(elem_classes=["tabs"]): | |
with gr.TabItem("Virtual Try-On", elem_id="vt_tab"): | |
with gr.Row(equal_height=True): | |
with gr.Column(scale=1): | |
gr.Markdown("### Upload Person Image", elem_classes=["section-title"]) | |
vt_src_image = gr.Image( | |
sources=["upload"], | |
type="filepath", | |
label="Person Image", | |
interactive=True, | |
height=400, | |
elem_classes=["image-upload"], | |
) | |
gr.Examples( | |
examples=person1_images, | |
inputs=vt_src_image, | |
examples_per_page=5, | |
elem_classes=["examples"], | |
) | |
with gr.Column(scale=1): | |
gr.Markdown("### Upload Garment Image", elem_classes=["section-title"]) | |
vt_ref_image = gr.Image( | |
sources=["upload"], | |
type="filepath", | |
label="Garment Image", | |
interactive=True, | |
height=400, | |
elem_classes=["image-upload"], | |
) | |
gr.Examples( | |
examples=garment_images, | |
inputs=vt_ref_image, | |
examples_per_page=5, | |
elem_classes=["examples"], | |
) | |
with gr.Column(scale=1): | |
gr.Markdown("### Result", elem_classes=["section-title"]) | |
vt_gen_image = gr.Image( | |
label="Generated Image", | |
height=400, | |
elem_classes=["image-output"], | |
) | |
vt_gen_button = gr.Button( | |
"Generate Image", | |
variant="primary", | |
size="lg", | |
elem_classes=["generate-btn"], | |
) | |
with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]): | |
vt_model_type = gr.Radio( | |
label="Model Type", | |
choices=[("VITON-HD (Recommended)", "viton_hd"), ("DressCode (Experimental)", "dress_code")], | |
value="viton_hd", | |
elem_classes=["radio"], | |
) | |
vt_garment_type = gr.Radio( | |
label="Garment Type", | |
choices=[("Upper", "upper_body"), ("Lower", "lower_body"), ("Dress", "dresses")], | |
value="upper_body", | |
elem_classes=["radio"], | |
) | |
vt_ref_acceleration = gr.Checkbox( | |
label="Accelerate Reference UNet", | |
value=False, | |
elem_classes=["checkbox"], | |
) | |
vt_repaint = gr.Checkbox( | |
label="Repaint Mode", | |
value=False, | |
elem_classes=["checkbox"], | |
) | |
vt_step = gr.Slider( | |
label="Inference Steps", | |
minimum=30, | |
maximum=100, | |
step=1, | |
value=30, | |
elem_classes=["slider"], | |
) | |
vt_scale = gr.Slider( | |
label="Guidance Scale", | |
minimum=0.1, | |
maximum=5.0, | |
step=0.1, | |
value=2.5, | |
elem_classes=["slider"], | |
) | |
vt_seed = gr.Number( | |
label="Random Seed", | |
minimum=-1, | |
maximum=2147483647, | |
step=1, | |
value=42, | |
elem_classes=["number"], | |
) | |
with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]): | |
vt_mask = gr.Image(label="Generated Mask", height=200) | |
vt_densepose = gr.Image(label="Generated DensePose", height=200) | |
vt_gen_button.click( | |
fn=leffa_predictor.dehasoft, | |
inputs=[vt_src_image, vt_ref_image, vt_ref_acceleration, vt_step, vt_scale, vt_seed, vt_model_type, vt_garment_type, vt_repaint], | |
outputs=[vt_gen_image, vt_mask, vt_densepose], | |
_js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }" | |
) | |
with gr.TabItem("Pose Transfer", elem_id="pt_tab"): | |
with gr.Row(equal_height=True): | |
with gr.Column(scale=1): | |
gr.Markdown("### Source Person Image", elem_classes=["section-title"]) | |
pt_ref_image = gr.Image( | |
sources=["upload"], | |
type="filepath", | |
label="Person Image", | |
interactive=True, | |
height=400, | |
elem_classes=["image-upload"], | |
) | |
gr.Examples( | |
examples=person1_images, | |
inputs=pt_ref_image, | |
examples_per_page=5, | |
elem_classes=["examples"], | |
) | |
with gr.Column(scale=1): | |
gr.Markdown("### Target Pose Image", elem_classes=["section-title"]) | |
pt_src_image = gr.Image( | |
sources=["upload"], | |
type="filepath", | |
label="Target Pose Person Image", | |
interactive=True, | |
height=400, | |
elem_classes=["image-upload"], | |
) | |
gr.Examples( | |
examples=person2_images, | |
inputs=pt_src_image, | |
examples_per_page=5, | |
elem_classes=["examples"], | |
) | |
with gr.Column(scale=1): | |
gr.Markdown("### Result", elem_classes=["section-title"]) | |
pt_gen_image = gr.Image( | |
label="Generated Image", | |
height=400, | |
elem_classes=["image-output"], | |
) | |
pt_gen_button = gr.Button( | |
"Generate Image", | |
variant="primary", | |
size="lg", | |
elem_classes=["generate-btn"], | |
) | |
with gr.Accordion("Advanced Settings", open=False, elem_classes=["accordion"]): | |
pt_ref_acceleration = gr.Checkbox( | |
label="Accelerate Reference UNet", | |
value=False, | |
elem_classes=["checkbox"], | |
) | |
pt_step = gr.Slider( | |
label="Inference Steps", | |
minimum=30, | |
maximum=100, | |
step=1, | |
value=30, | |
elem_classes=["slider"], | |
) | |
pt_scale = gr.Slider( | |
label="Guidance Scale", | |
minimum=0.1, | |
maximum=5.0, | |
step=0.1, | |
value=2.5, | |
elem_classes=["slider"], | |
) | |
pt_seed = gr.Number( | |
label="Random Seed", | |
minimum=-1, | |
maximum=2147483647, | |
step=1, | |
value=42, | |
elem_classes=["number"], | |
) | |
with gr.Accordion("Debug Info", open=False, elem_classes=["accordion"]): | |
pt_mask = gr.Image(label="Generated Mask", height=200) | |
pt_densepose = gr.Image(label="Generated DensePose", height=200) | |
pt_gen_button.click( | |
fn=leffa_predictor.leffa_predict_pt, | |
inputs=[pt_src_image, pt_ref_image, pt_ref_acceleration, pt_step, pt_scale, pt_seed], | |
outputs=[pt_gen_image, pt_mask, pt_densepose], | |
_js="() => { document.querySelector('.generate-btn').classList.add('loading'); setTimeout(() => document.querySelector('.generate-btn').classList.remove('loading'), 5000); }" | |
) | |
gr.Markdown(footer_note, elem_classes=["footer"]) | |
demo.css = """ | |
.title { text-align: center; font-size: 2.5em; margin-bottom: 10px; color: #4f46e5; } | |
.description { text-align: center; font-size: 1.2em; margin-bottom: 20px; color: #374151; } | |
.section-title { font-size: 1.5em; color: #6b7280; margin-bottom: 10px; } | |
.image-upload, .image-output { border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); } | |
.generate-btn { transition: all 0.3s ease; } | |
.generate-btn:hover { transform: scale(1.05); } | |
.generate-btn.loading { opacity: 0.7; cursor: not-allowed; } | |
.accordion { background-color: #f9fafb; border-radius: 8px; } | |
.radio, .checkbox, .slider, .number { margin: 5px 0; } | |
.examples { margin-top: 10px; } | |
.footer { text-align: center; margin-top: 20px; font-size: 0.9em; color: #6b7280; } | |
""" | |
demo.launch(share=True, server_port=7860, allowed_paths=["./ckpts/examples"]) |