Spaces:
Running
on
Zero
Running
on
Zero
# import gradio as gr | |
# import torch | |
# from diffusers import StableDiffusionControlNetPipeline, ControlNetModel | |
# from PIL import Image | |
# import base64 | |
# from io import BytesIO | |
# # You can change these: | |
# BASE_MODEL = "runwayml/stable-diffusion-v1-5" | |
# CONTROLNET_ID = "lllyasviel/sd-controlnet-canny" # placeholder; change to a QR-focused ControlNet if you have one | |
# device = "cuda" if torch.cuda.is_available() else "cpu" | |
# controlnet = ControlNetModel.from_pretrained( | |
# CONTROLNET_ID, torch_dtype=torch.float16 if device=="cuda" else torch.float32 | |
# ) | |
# pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
# BASE_MODEL, | |
# controlnet=controlnet, | |
# torch_dtype=torch.float16 if device=="cuda" else torch.float32, | |
# safety_checker=None | |
# ) | |
# pipe.to(device) | |
# def generate(prompt, control_image, guidance_scale=7.5, steps=30, seed=0): | |
# print("API called:", type(control_image)) | |
# generator = torch.Generator(device=device).manual_seed(int(seed)) if seed else None | |
# img = pipe( | |
# prompt=prompt, | |
# image=control_image, | |
# num_inference_steps=int(steps), | |
# guidance_scale=float(guidance_scale), | |
# generator=generator | |
# ).images[0] | |
# return img | |
# with gr.Blocks() as demo: | |
# gr.Markdown("# ControlNet Image Generator") | |
# with gr.Row(): | |
# prompt = gr.Textbox(label="Prompt", value="A futuristic poster, high detail") | |
# seed = gr.Number(label="Seed (0=random)", value=0) | |
# with gr.Row(): | |
# control = gr.Image(type="pil", label="Control image (e.g., QR or edge map)") | |
# steps = gr.Slider(10, 50, 30, step=1, label="Steps") | |
# guidance = gr.Slider(1.0, 12.0, 7.5, step=0.1, label="Guidance scale") | |
# out = gr.Image(label="Result") | |
# btn = gr.Button("Generate") | |
# btn.click(generate, [prompt, control, guidance, steps, seed], out) | |
# # Enable simple API use | |
# gr.Examples([], inputs=[prompt, control, guidance, steps, seed], outputs=out) | |
# demo.launch() | |
# app.py — minimal, API-stable echo server + Gradio UI | |
import base64, io | |
from typing import List, Any | |
from PIL import Image | |
import gradio as gr | |
from fastapi import FastAPI | |
from pydantic import BaseModel | |
# ---- helpers ---- | |
def data_url_to_pil(data_url: str) -> Image.Image: | |
# expects "data:image/<ext>;base64,<...>" | |
b64 = data_url.split(",", 1)[1] | |
return Image.open(io.BytesIO(base64.b64decode(b64))).convert("RGB") | |
def pil_to_data_url(img: Image.Image) -> str: | |
buf = io.BytesIO() | |
img.save(buf, format="PNG") | |
return "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode() | |
# ---- your model fn (for now just echo control image) ---- | |
def generate(prompt: str, control_image: Image.Image, guidance: float, steps: int, seed: int): | |
# TODO: replace with ControlNet pipeline call | |
return control_image | |
# ---- Gradio UI (not required for API, but nice to see) ---- | |
demo = gr.Interface( | |
fn=generate, | |
inputs=[ | |
gr.Textbox(label="Prompt", value="High-quality artistic style, preserve structure"), | |
gr.Image(type="pil", label="Control Image (PNG/JPG)"), | |
gr.Slider(1, 12, 7.5, step=0.1, label="Guidance scale"), | |
gr.Slider(10, 50, 30, step=1, label="Steps"), | |
gr.Number(0, label="Seed"), | |
], | |
outputs=gr.Image(label="Result"), | |
) | |
# ---- FastAPI endpoint (stable path for Postman/Next.js) ---- | |
app = FastAPI() | |
class PredictIn(BaseModel): | |
data: List[Any] # [prompt, controlImageBase64, guidance, steps, seed] | |
def predict(payload: PredictIn): | |
prompt, control_b64, guidance, steps, seed = payload.data | |
img = data_url_to_pil(control_b64) if isinstance(control_b64, str) else control_b64 | |
out = generate(str(prompt), img, float(guidance), int(steps), int(seed)) | |
return { "data": [ pil_to_data_url(out) ] } | |
# mount Gradio at "/" | |
app = gr.mount_gradio_app(app, demo, path="/") | |