Spaces:
Sleeping
Sleeping
File size: 8,973 Bytes
f8a748e 7f48662 d8e8827 2f84586 1876385 2f84586 7f48662 f8a748e 647b23c 97e4edd 647b23c 2f84586 647b23c 2f84586 f8a748e 690eecc 2f84586 b712951 2f84586 7f48662 a713a09 7f48662 2f84586 d8e8827 f8a748e 2f84586 7f48662 a713a09 b712951 200a130 b712951 200a130 b712951 2f84586 aac204e b712951 200a130 4004f94 2f84586 730f5fd 65e7ed8 d8e8827 53aed50 65e7ed8 a713a09 2f84586 4004f94 7f48662 5ed5dc0 a713a09 7f48662 2f84586 4004f94 7f48662 2f84586 200a130 2f84586 200a130 2f84586 730f5fd 2f84586 200a130 2f84586 200a130 2f84586 44bc074 2f84586 2ff7a7b 2f84586 5ed5dc0 2f84586 a713a09 2f84586 7f48662 a713a09 7f48662 2f84586 200a130 2f84586 200a130 f8a748e 2f84586 a713a09 2f84586 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import gradio as gr
import os
import random
from PIL import Image
import spaces
import torch
from transformers import MllamaForConditionalGeneration, AutoProcessor
from OmniGen import OmniGenPipeline
from huggingface_hub import login
Llama32V_HFtoken = os.getenv("Llama32V")
login(Llama32V_HFtoken)
pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1")
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
processor = AutoProcessor.from_pretrained(model_id)
@spaces.GPU()
def predict_clothing(images):
messages = [{"role": "user", "content":
[
{"type": "image"},
{"type": "text", "text": "Define this clothing in 1-3 words. Your response should be only the definition."}
]}
]
input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
output_texts = []
for image in images:
inputs = processor(image, input_text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(**inputs, max_new_tokens=30)
output_texts.append(str(processor.decode(output[0])))
return output_texts
@spaces.GPU(duration=180)
def generate_image(img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=3):
input_images = [img1, img2, img3]
# Delete None
input_images = [img for img in input_images if img is not None]
if len(input_images) == 0:
input_images = None
wears = predict_clothing(input_images[1:])
if len(wears)==1:
dress = wears[0]
text = """A male wearing a {dress}. The male is in <img><|image_1|></img>. The {dress} is in <img><|image_2|></img>."""
elif len(wears)==2:
topwear, bottomwear = wears[0], wears[1]
text = """A male wearing a {topwear} and a {bottomwear}. The male is in <img><|image_1|></img>.
The {topwear} is in <img><|image_2|></img>. The {bottomwear} is in <img><|image_3|></img>."""
else:
input_images = None
if randomize_seed:
seed = random.randint(0, 10000000)
output = pipe(prompt=text, input_images=input_images, height=height, width=width, guidance_scale=guidance_scale,
img_guidance_scale=img_guidance_scale, num_inference_steps=inference_steps, separate_cfg_infer=separate_cfg_infer,
use_kv_cache=True, offload_kv_cache=True, offload_model=offload_model,
use_input_image_size_as_output=use_input_image_size_as_output, seed=seed, max_input_image_size=max_input_image_size,)
img = output[0]
return img
example_text =
def get_example():
case = [
[ "./imgs/test_cases/icl1.jpg",
"./imgs/test_cases/icl2.jpg",
"./imgs/test_cases/icl3.jpg",
224,
224,
1.6,
1,
768,
False,
False,
2.5
],
]
return case
def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed, max_input_image_size, randomize_seed,
use_input_image_size_as_output, guidance_scale==3):
# Check the internal configuration of the function
inference_steps = 50
separate_cfg_infer = True
offload_model = False
text = "According to the following examples, generate an output for the input.\nInput: <img><|image_1|></img>\nOutput: <img><|image_2|></img>\n\nInput: <img><|image_3|></img>\nOutput:"
return generate_image(text, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed,
separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale)
description = """
This is a Virtual Try-On Platform.
Usage:
- First upload your own image as the first image, also tagged 'Person'
- Then upload you 'Top-wear' and 'Bottom-wear' images
- If its a single dress, and/or you don't have a Topwear and Bottomwear as separate images upload that single image under 'Topwear'
Tips:
- For image editing task and controlnet task, we recommend setting the height and width of output image as the same as input image. For example, if you want to edit a 512x512 image, you should set the height and width of output image as 512x512. You also can set the `use_input_image_size_as_output` to automatically set the height and width of output image as the same as input image.
- For out-of-memory or time cost, you can set `offload_model=True` or refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting.
- If inference time is too long when inputting multiple images, please try to reduce the `max_input_image_size`. For more details please refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources).
**HF Spaces often encounter errors due to quota limitations, so recommend to run it locally.**
"""
Credits = """**Credits**
Made using [OmniGen](https://huggingface.co/Shitao/OmniGen-v1): Unified Image Generation [paper](https://arxiv.org/abs/2409.11340) [code](https://github.com/VectorSpaceLab/OmniGen)
"""
# Gradio
with gr.Blocks() as demo:
gr.Markdown("Virtual Try-On")
gr.Markdown(description)
with gr.Row():
with gr.Row(equal_height=True):
# input images
image_input_1 = gr.Image(label="Person", type="filepath")
image_input_2 = gr.Image(label="Top-wear", type="filepath")
image_input_3 = gr.Image(label="Bottom-wear", type="filepath")
# slider
height_input = gr.Slider(
label="Height", minimum=128, maximum=2048, value=1024, step=16
)
width_input = gr.Slider(
label="Width", minimum=128, maximum=2048, value=1024, step=16
)
guidance_scale_input = gr.Slider(
label="Guidance Scale", minimum=1.0, maximum=5.0, value=2.5, step=0.1
)
img_guidance_scale_input = gr.Slider(
label="img_guidance_scale", minimum=1.0, maximum=2.0, value=1.6, step=0.1
)
num_inference_steps = gr.Slider(
label="Inference Steps", minimum=1, maximum=100, value=50, step=1
)
seed_input = gr.Slider(
label="Seed", minimum=0, maximum=2147483647, value=42, step=1
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
max_input_image_size = gr.Slider(
label="max_input_image_size", minimum=128, maximum=2048, value=1024, step=16
)
separate_cfg_infer = gr.Checkbox(
label="separate_cfg_infer", info="Whether to use separate inference process for different guidance. This will reduce the memory cost.", value=True,
)
offload_model = gr.Checkbox(
label="offload_model", info="Offload model to CPU, which will significantly reduce the memory cost but slow down the generation speed. You can cancel separate_cfg_infer and set offload_model=True. If both separate_cfg_infer and offload_model are True, further reduce the memory, but slowest generation", value=False,
)
use_input_image_size_as_output = gr.Checkbox(
label="use_input_image_size_as_output", info="Automatically adjust the output image size to be same as input image size. For editing and controlnet task, it can make sure the output image has the same size as input image leading to better performance", value=False,
)
# generate
generate_button = gr.Button("Generate Image")
with gr.Row():
# output image
output_image = gr.Image(label="Output Image")
# click
generate_button.click(
generate_image,
inputs=[image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, num_inference_steps,
seed_input, separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed,
guidance_scale_input,],
outputs=output_image,
)
gr.Examples(
examples=get_example(),
fn=run_for_examples,
inputs=[image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, seed_input,
max_input_image_size, randomize_seed, use_input_image_size_as_output,guidance_scale_input],
outputs=output_image,
)
gr.Markdown(Credits)
# launch
demo.launch() |