Tanut
Rollback
31f7555
raw
history blame
1.88 kB
import gradio as gr
import torch
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
from PIL import Image
import base64
from io import BytesIO
# You can change these:
BASE_MODEL = "runwayml/stable-diffusion-v1-5"
CONTROLNET_ID = "lllyasviel/sd-controlnet-canny" # placeholder; change to a QR-focused ControlNet if you have one
device = "cuda" if torch.cuda.is_available() else "cpu"
controlnet = ControlNetModel.from_pretrained(
CONTROLNET_ID, torch_dtype=torch.float16 if device=="cuda" else torch.float32
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
BASE_MODEL,
controlnet=controlnet,
torch_dtype=torch.float16 if device=="cuda" else torch.float32,
safety_checker=None
)
pipe.to(device)
def generate(prompt, control_image, guidance_scale=7.5, steps=30, seed=0):
generator = torch.Generator(device=device).manual_seed(int(seed)) if seed else None
img = pipe(
prompt=prompt,
image=control_image,
num_inference_steps=int(steps),
guidance_scale=float(guidance_scale),
generator=generator
).images[0]
return img
with gr.Blocks() as demo:
gr.Markdown("# ControlNet Image Generator")
with gr.Row():
prompt = gr.Textbox(label="Prompt", value="A futuristic poster, high detail")
seed = gr.Number(label="Seed (0=random)", value=0)
with gr.Row():
control = gr.Image(type="pil", label="Control image (e.g., QR or edge map)")
steps = gr.Slider(10, 50, 30, step=1, label="Steps")
guidance = gr.Slider(1.0, 12.0, 7.5, step=0.1, label="Guidance scale")
out = gr.Image(label="Result")
btn = gr.Button("Generate")
btn.click(generate, [prompt, control, guidance, steps, seed], out)
# Enable simple API use
gr.Examples([], inputs=[prompt, control, guidance, steps, seed], outputs=out)
demo.launch()