# PyTorch 2.8 (temporary hack) import os os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces') import gradio as gr import numpy as np import random import torch import spaces from PIL import Image from diffusers import QwenImageEditPipeline from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler import torch import math from optimization import optimize_pipeline_ # --- Model Loading --- dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" # Load the model pipeline # scheduler config needed for the LoRA # From https://github.com/ModelTC/Qwen-Image-Lightning/blob/342260e8f5468d2f24d084ce04f55e101007118b/generate_with_diffusers.py#L82C9-L97C10 scheduler_config = { "base_image_seq_len": 256, "base_shift": math.log(3), # We use shift=3 in distillation "invert_sigmas": False, "max_image_seq_len": 8192, "max_shift": math.log(3), # We use shift=3 in distillation "num_train_timesteps": 1000, "shift": 1.0, "shift_terminal": None, # set shift_terminal to None "stochastic_sampling": False, "time_shift_type": "exponential", "use_beta_sigmas": False, "use_dynamic_shifting": True, "use_exponential_sigmas": False, "use_karras_sigmas": False, } scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config) pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", scheduler=scheduler, torch_dtype=dtype).to(device) # lora loading pipe.load_lora_weights( "lightx2v/Qwen-Image-Lightning", weight_name="Qwen-Image-Lightning-8steps-V1.0.safetensors", adapter_name="lightx2v" ) pipe.set_adapters(["lightx2v"], adapter_weights=[1.]) pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=1., components=["transformer"]) pipe.unload_lora_weights() optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt='prompt') # --- UI Constants and Helpers --- MAX_SEED = np.iinfo(np.int32).max # --- Main Inference Function (with hardcoded negative prompt) --- @spaces.GPU(duration=120) def infer( image, prompt, seed=42, randomize_seed=False, guidance_scale=4.0, true_guidance_scale=1.0, num_inference_steps=8, progress=gr.Progress(track_tqdm=True), ): """ Generates an image using the local Qwen-Image diffusers pipeline. """ # Hardcode the negative prompt as requested negative_prompt = "text, watermark, copyright, blurry, low resolution" if randomize_seed: seed = random.randint(0, MAX_SEED) # Set up the generator for reproducibility generator = torch.Generator(device=device).manual_seed(seed) print(f"Calling pipeline with prompt: '{prompt}'") print(f"Negative Prompt: '{negative_prompt}'") print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {guidance_scale}") # Generate the image image = pipe( image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, generator=generator, true_cfg_scale=true_guidance_scale, guidance_scale=guidance_scale ).images[0] return image, seed # --- Examples and UI Layout --- examples = [] css = """ #col-container { margin: 0 auto; max-width: 1024px; } #edit_text{margin-top: -62px !important} """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML('Qwen-Image Logo') gr.HTML('

Edit

', elem_id="edit_text") gr.Markdown("[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series. Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.") with gr.Row(): with gr.Column(): input_image = gr.Image(label="Input Image", show_label=False, type="pil") prompt = gr.Text( label="Prompt", show_label=False, placeholder="describe the edit instruction", container=False, ) run_button = gr.Button("Edit!", variant="primary") result = gr.Image(label="Result", show_label=False, type="pil") with gr.Accordion("Advanced Settings", open=False): # Negative prompt UI element is removed here seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): guidance_scale = gr.Slider( label="Distilled guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=1.0, ) true_guidance_scale = gr.Slider( label="True guidance scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0 ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=8, ) # gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[ input_image, prompt, # negative_prompt is no longer an input from the UI seed, randomize_seed, guidance_scale, true_guidance_scale, num_inference_steps, ], outputs=[result, seed], ) if __name__ == "__main__": demo.launch()