File size: 1,416 Bytes
2ceb133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
956c1ae
2ceb133
 
f10b363
 
2ceb133
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
import torch
from diffusers import AutoPipelineForImage2Image, DPMSolverMultistepScheduler

base_model = "SG161222/RealVisXL_V4.0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

pipeline = AutoPipelineForImage2Image.from_pretrained(
    base_model, torch_dtype=torch.float16, use_safetensors=True
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline.to(device)
generator = torch.Generator(device).manual_seed(0)

def image_to_image(input_image, prompt, guidance_scale, num_inference_steps):
    # Generate the output image
    output_image = pipeline(
        generator=generator,
        prompt=prompt, image=input_image, 
        guidance_scale=guidance_scale, num_inference_steps = num_inference_steps
    ).images[0]
    
    return output_image

with gr.Blocks() as grApp:
    input_image = gr.Image(label="Input Image")
    prompt = gr.Textbox(lines=3, label="Prompt")
    guidance_scale = gr.Slider(minimum=0, maximum=1, value=0.75, label="Guidance Scale")
    num_inference_steps = gr.Slider(minimum=10, maximum=100, value=25, label="Number of Inference Steps")
    output_image = gr.Image()
    generate_btn = gr.Button("Generate Image")
    generate_btn.click(
        fn=image_to_image,
        inputs=[input_image, prompt, guidance_scale, num_inference_steps],
        outputs=output_image,
    )

grApp.launch()