|
import os |
|
import sys |
|
import subprocess |
|
subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "-y", "deepspeed"]) |
|
import random |
|
import spaces |
|
import numpy as np |
|
import torch |
|
from PIL import Image |
|
import gradio as gr |
|
from diffusers import DiffusionPipeline |
|
from blip3o.conversation import conv_templates |
|
from blip3o.model.builder import load_pretrained_model |
|
from blip3o.utils import disable_torch_init |
|
from blip3o.mm_utils import get_model_name_from_path |
|
from qwen_vl_utils import process_vision_info |
|
from huggingface_hub import snapshot_download |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor |
|
|
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") |
|
|
|
|
|
MAX_SEED = 10000 |
|
|
|
HUB_MODEL_ID = "BLIP3o/BLIP3o-Model-8B" |
|
model_snapshot_path = snapshot_download(repo_id=HUB_MODEL_ID) |
|
diffusion_path = os.path.join(model_snapshot_path, "diffusion-decoder") |
|
|
|
def set_global_seed(seed: int = 42): |
|
random.seed(seed) |
|
np.random.seed(seed) |
|
torch.manual_seed(seed) |
|
torch.cuda.manual_seed_all(seed) |
|
|
|
def add_template(prompt_list: list[str]) -> str: |
|
conv = conv_templates['qwen'].copy() |
|
conv.append_message(conv.roles[0], prompt_list[0]) |
|
conv.append_message(conv.roles[1], None) |
|
return conv.get_prompt() |
|
|
|
def make_prompt(text: str) -> list[str]: |
|
raw = f"Please generate image based on the following caption: {text}" |
|
return [add_template([raw])] |
|
|
|
def randomize_seed_fn(seed: int, randomize: bool) -> int: |
|
return random.randint(0, MAX_SEED) if randomize else seed |
|
|
|
@spaces.GPU |
|
def generate_image(prompt: str, final_seed: int, guidance_scale: float, images_to_generate: int, progress: gr.Progress = gr.Progress(track_tqdm=True)) -> list[Image.Image]: |
|
set_global_seed(final_seed) |
|
formatted = make_prompt(prompt) |
|
images = [] |
|
for _ in range(images_to_generate): |
|
out = pipe(formatted, guidance_scale=guidance_scale) |
|
images.append(out.image) |
|
return images |
|
|
|
@spaces.GPU |
|
def process_image(prompt: str, img: Image.Image, progress: gr.Progress = gr.Progress(track_tqdm=True)) -> str: |
|
messages = [{ |
|
"role": "user", |
|
"content": [ |
|
{"type": "image", "image": img}, |
|
{"type": "text", "text": prompt}, |
|
], |
|
}] |
|
|
|
text_prompt_for_qwen = processor.apply_chat_template( |
|
messages, tokenize=False, add_generation_prompt=True |
|
) |
|
image_inputs, video_inputs = process_vision_info(messages) |
|
inputs = processor( |
|
text=[text_prompt_for_qwen], |
|
images=image_inputs, |
|
videos=video_inputs, |
|
padding=True, |
|
return_tensors="pt", |
|
).to('cuda') |
|
generated_ids = multi_model.generate(**inputs, max_new_tokens=1024) |
|
input_token_len = inputs.input_ids.shape[1] |
|
generated_ids_trimmed = generated_ids[:, input_token_len:] |
|
output_text = processor.batch_decode( |
|
generated_ids_trimmed, skip_special_tokens=True, |
|
clean_up_tokenization_spaces=False |
|
)[0] |
|
return output_text |
|
|
|
print("Diffusion path: ", diffusion_path) |
|
|
|
disable_torch_init() |
|
|
|
tokenizer, multi_model, _ = load_pretrained_model( |
|
model_snapshot_path, None, get_model_name_from_path(model_snapshot_path) |
|
) |
|
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
diffusion_path, |
|
custom_pipeline="pipeline_llava_gen", |
|
torch_dtype=torch.bfloat16, |
|
use_safetensors=True, |
|
variant="bf16", |
|
multimodal_encoder=multi_model, |
|
tokenizer=tokenizer, |
|
safety_checker=None |
|
) |
|
pipe.vae.to('cuda') |
|
pipe.unet.to('cuda') |
|
|
|
|
|
with gr.Blocks(title="BLIP3-o") as demo: |
|
gr.Markdown('''# BLIP3-o |
|
A fully open source unified model for both image understanding and generation, check our Github: https://github.com/JiuhaiChen/BLIP3o and Paper: https://arxiv.org/abs/2505.09568 |
|
''') |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Tabs(): |
|
with gr.TabItem("Text → Image (Image Generation)"): |
|
prompt_gen_input = gr.Textbox( |
|
label="Prompt", |
|
placeholder="Describe the image you want...", |
|
lines=1 |
|
) |
|
seed_slider = gr.Slider( |
|
label="Seed", |
|
minimum=0, maximum=int(MAX_SEED), |
|
step=1, value=42 |
|
) |
|
randomize_checkbox = gr.Checkbox( |
|
label="Randomize seed", value=False |
|
) |
|
guidance_slider = gr.Slider( |
|
label="Guidance Scale", |
|
minimum=1.0, maximum=30.0, |
|
step=0.5, value=3.0 |
|
) |
|
images_to_generate = gr.Slider( |
|
label="Number of images", |
|
minimum=1, maximum=4, |
|
step=1, value=4 |
|
) |
|
run_image_gen_btn = gr.Button("Generate Image") |
|
|
|
text_gen_examples_data = [ |
|
["A cute cat."], |
|
["A young woman with freckles wearing a straw hat, standing in a golden wheat field."], |
|
["A group of friends having a picnic in the park."] |
|
] |
|
gr.Examples( |
|
examples=text_gen_examples_data, |
|
inputs=[prompt_gen_input], |
|
cache_examples=False, |
|
label="Image Generation Examples" |
|
) |
|
|
|
with gr.TabItem("Image → Text (Image Understanding)"): |
|
image_understand_input = gr.Image(label="Input Image", type="pil") |
|
prompt_understand_input = gr.Textbox( |
|
label="Question about image", |
|
placeholder="Describe what you want to know about the image (e.g., What is in this image?)", |
|
lines=1 |
|
) |
|
run_image_understand_btn = gr.Button("Understand Image") |
|
|
|
image_understanding_examples_data = [ |
|
["animal-compare.png", "Are these two pictures showing the same kind of animal?"], |
|
["funny_image.jpeg", "Why is this image funny?"], |
|
["animal-compare.png", "Describe this image in detail."], |
|
] |
|
gr.Examples( |
|
examples=image_understanding_examples_data, |
|
inputs=[image_understand_input, prompt_understand_input], |
|
cache_examples=False, |
|
label="Image Understanding Examples" |
|
) |
|
|
|
clean_btn = gr.Button("Clear All Inputs/Outputs") |
|
|
|
with gr.Column(): |
|
output_gallery = gr.Gallery(label="Generated Images", columns=2, visible=True) |
|
output_text = gr.Textbox(label="Generated Text", visible=False, lines=5, interactive=False) |
|
|
|
|
|
@spaces.GPU |
|
def run_generate_image_tab(prompt, seed, guidance, num_images, progress=gr.Progress(track_tqdm=True)): |
|
|
|
imgs = generate_image(prompt, seed, guidance, num_images, progress=progress) |
|
return ( |
|
gr.update(value=imgs, visible=True), |
|
gr.update(value="", visible=False) |
|
) |
|
|
|
@spaces.GPU |
|
def run_process_image_tab(img, prompt, progress=gr.Progress(track_tqdm=True)): |
|
if img is None: |
|
return ( |
|
gr.update(value=[], visible=False), |
|
gr.update(value="Please upload an image for understanding.", visible=True) |
|
) |
|
txt = process_image(prompt, img, progress=progress) |
|
return ( |
|
gr.update(value=[], visible=False), |
|
gr.update(value=txt, visible=True) |
|
) |
|
|
|
def clean_all_fn(): |
|
return ( |
|
|
|
gr.update(value=""), |
|
gr.update(value=42), |
|
gr.update(value=False), |
|
gr.update(value=3.0), |
|
|
|
gr.update(value=None), |
|
gr.update(value=""), |
|
|
|
gr.update(value=[], visible=True), |
|
gr.update(value="", visible=False) |
|
) |
|
|
|
gen_inputs = [prompt_gen_input, seed_slider, guidance_slider, images_to_generate] |
|
|
|
run_image_gen_btn.click( |
|
fn=randomize_seed_fn, |
|
inputs=[seed_slider, randomize_checkbox], |
|
outputs=[seed_slider] |
|
).then( |
|
fn=run_generate_image_tab, |
|
inputs=gen_inputs, |
|
outputs=[output_gallery, output_text] |
|
) |
|
|
|
prompt_gen_input.submit( |
|
fn=randomize_seed_fn, |
|
inputs=[seed_slider, randomize_checkbox], |
|
outputs=[seed_slider] |
|
).then( |
|
fn=run_generate_image_tab, |
|
inputs=gen_inputs, |
|
outputs=[output_gallery, output_text] |
|
) |
|
|
|
|
|
understand_inputs = [image_understand_input, prompt_understand_input] |
|
|
|
run_image_understand_btn.click( |
|
fn=run_process_image_tab, |
|
inputs=understand_inputs, |
|
outputs=[output_gallery, output_text] |
|
) |
|
|
|
prompt_understand_input.submit( |
|
fn=run_process_image_tab, |
|
inputs=understand_inputs, |
|
outputs=[output_gallery, output_text] |
|
) |
|
|
|
clean_btn.click( |
|
fn=clean_all_fn, |
|
inputs=[], |
|
outputs=[ |
|
prompt_gen_input, seed_slider, randomize_checkbox, guidance_slider, |
|
image_understand_input, prompt_understand_input, |
|
output_gallery, output_text |
|
] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(share=True) |