import gradio as gr import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid from PIL import Image import requests from io import BytesIO # Load the pipeline pipeline = AutoPipelineForImage2Image.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) # Offload model to reduce memory usage pipeline.enable_model_cpu_offload() # Function to load the initial image from a URL def load_init_image(url): response = requests.get(url) return Image.open(BytesIO(response.content)) # Gradio function for image generation def generate_image(prompt, image_url, strength): init_image = load_init_image(image_url) # Load the initial image from the URL result_image = pipeline(prompt, image=init_image, strength=strength).images[0] # Display both the initial and result images side by side grid_image = make_image_grid([init_image, result_image], rows=1, cols=2) return grid_image # Gradio interface gr.Interface( fn=generate_image, inputs=[ gr.Textbox(lines=1, label="Prompt", placeholder="Enter the image description prompt"), gr.Textbox(lines=1, label="Image URL", placeholder="Enter the URL of the initial image"), gr.Slider(0.0, 1.0, value=0.5, label="Strength"), ], outputs=gr.Image(label="Image Comparison"), title="Stable Diffusion XL Refiner - Image to Image", description="Generate an image transformation from an initial image and a text prompt using the Stable Diffusion XL Refiner model.", ).launch()