File size: 3,923 Bytes
1fc8d06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e383bbb
1fc8d06
 
 
31f7555
1fc8d06
 
 
 
 
e383bbb
1fc8d06
 
 
 
e383bbb
1fc8d06
 
 
 
e383bbb
1fc8d06
 
 
 
 
 
 
 
 
 
 
31f7555
1fc8d06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# import gradio as gr
# import torch
# from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
# from PIL import Image
# import base64
# from io import BytesIO

# # You can change these:
# BASE_MODEL = "runwayml/stable-diffusion-v1-5"
# CONTROLNET_ID = "lllyasviel/sd-controlnet-canny"  # placeholder; change to a QR-focused ControlNet if you have one

# device = "cuda" if torch.cuda.is_available() else "cpu"

# controlnet = ControlNetModel.from_pretrained(
#     CONTROLNET_ID, torch_dtype=torch.float16 if device=="cuda" else torch.float32
# )

# pipe = StableDiffusionControlNetPipeline.from_pretrained(
#     BASE_MODEL,
#     controlnet=controlnet,
#     torch_dtype=torch.float16 if device=="cuda" else torch.float32,
#     safety_checker=None
# )
# pipe.to(device)

# def generate(prompt, control_image, guidance_scale=7.5, steps=30, seed=0):
#     print("API called:", type(control_image))
#     generator = torch.Generator(device=device).manual_seed(int(seed)) if seed else None
#     img = pipe(
#         prompt=prompt,
#         image=control_image,
#         num_inference_steps=int(steps),
#         guidance_scale=float(guidance_scale),
#         generator=generator
#     ).images[0]
#     return img

# with gr.Blocks() as demo:
#     gr.Markdown("# ControlNet Image Generator")
#     with gr.Row():
#         prompt = gr.Textbox(label="Prompt", value="A futuristic poster, high detail")
#         seed = gr.Number(label="Seed (0=random)", value=0)
#     with gr.Row():
#         control = gr.Image(type="pil", label="Control image (e.g., QR or edge map)")
#         steps = gr.Slider(10, 50, 30, step=1, label="Steps")
#         guidance = gr.Slider(1.0, 12.0, 7.5, step=0.1, label="Guidance scale")
#     out = gr.Image(label="Result")

#     btn = gr.Button("Generate")
#     btn.click(generate, [prompt, control, guidance, steps, seed], out)

#     # Enable simple API use
#     gr.Examples([], inputs=[prompt, control, guidance, steps, seed], outputs=out)

# demo.launch()

# app.py  — minimal, API-stable echo server + Gradio UI
import base64, io
from typing import List, Any
from PIL import Image
import gradio as gr
from fastapi import FastAPI
from pydantic import BaseModel

# ---- helpers ----
def data_url_to_pil(data_url: str) -> Image.Image:
    # expects "data:image/<ext>;base64,<...>"
    b64 = data_url.split(",", 1)[1]
    return Image.open(io.BytesIO(base64.b64decode(b64))).convert("RGB")

def pil_to_data_url(img: Image.Image) -> str:
    buf = io.BytesIO()
    img.save(buf, format="PNG")
    return "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode()

# ---- your model fn (for now just echo control image) ----
def generate(prompt: str, control_image: Image.Image, guidance: float, steps: int, seed: int):
    # TODO: replace with ControlNet pipeline call
    return control_image

# ---- Gradio UI (not required for API, but nice to see) ----
demo = gr.Interface(
    fn=generate,
    inputs=[
        gr.Textbox(label="Prompt", value="High-quality artistic style, preserve structure"),
        gr.Image(type="pil", label="Control Image (PNG/JPG)"),
        gr.Slider(1, 12, 7.5, step=0.1, label="Guidance scale"),
        gr.Slider(10, 50, 30, step=1, label="Steps"),
        gr.Number(0, label="Seed"),
    ],
    outputs=gr.Image(label="Result"),
)

# ---- FastAPI endpoint (stable path for Postman/Next.js) ----
app = FastAPI()

class PredictIn(BaseModel):
    data: List[Any]  # [prompt, controlImageBase64, guidance, steps, seed]

@app.post("/api/predict/0")
def predict(payload: PredictIn):
    prompt, control_b64, guidance, steps, seed = payload.data
    img = data_url_to_pil(control_b64) if isinstance(control_b64, str) else control_b64
    out = generate(str(prompt), img, float(guidance), int(steps), int(seed))
    return { "data": [ pil_to_data_url(out) ] }

# mount Gradio at "/"
app = gr.mount_gradio_app(app, demo, path="/")