Tanut commited on
Commit
0aeab2f
·
1 Parent(s): e383bbb

Checking URL

Browse files
Files changed (1) hide show
  1. app.py +71 -48
app.py CHANGED
@@ -1,54 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- import torch
3
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
4
  from PIL import Image
5
- import base64
6
- from io import BytesIO
7
-
8
- # You can change these:
9
- BASE_MODEL = "runwayml/stable-diffusion-v1-5"
10
- CONTROLNET_ID = "lllyasviel/sd-controlnet-canny" # placeholder; change to a QR-focused ControlNet if you have one
11
 
12
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
13
 
14
- controlnet = ControlNetModel.from_pretrained(
15
- CONTROLNET_ID, torch_dtype=torch.float16 if device=="cuda" else torch.float32
 
 
 
 
 
 
 
 
16
  )
17
 
18
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
- BASE_MODEL,
20
- controlnet=controlnet,
21
- torch_dtype=torch.float16 if device=="cuda" else torch.float32,
22
- safety_checker=None
23
- )
24
- pipe.to(device)
25
-
26
- def generate(prompt, control_image, guidance_scale=7.5, steps=30, seed=0):
27
- generator = torch.Generator(device=device).manual_seed(int(seed)) if seed else None
28
- img = pipe(
29
- prompt=prompt,
30
- image=control_image,
31
- num_inference_steps=int(steps),
32
- guidance_scale=float(guidance_scale),
33
- generator=generator
34
- ).images[0]
35
- return img
36
-
37
- with gr.Blocks() as demo:
38
- gr.Markdown("# ControlNet Image Generator")
39
- with gr.Row():
40
- prompt = gr.Textbox(label="Prompt", value="A futuristic poster, high detail")
41
- seed = gr.Number(label="Seed (0=random)", value=0)
42
- with gr.Row():
43
- control = gr.Image(type="pil", label="Control image (e.g., QR or edge map)")
44
- steps = gr.Slider(10, 50, 30, step=1, label="Steps")
45
- guidance = gr.Slider(1.0, 12.0, 7.5, step=0.1, label="Guidance scale")
46
- out = gr.Image(label="Result")
47
-
48
- btn = gr.Button("Generate")
49
- btn.click(generate, [prompt, control, guidance, steps, seed], out)
50
-
51
- # Enable simple API use
52
- gr.Examples([], inputs=[prompt, control, guidance, steps, seed], outputs=out)
53
-
54
- demo.launch()
 
1
+ # import gradio as gr
2
+ # import torch
3
+ # from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
4
+ # from PIL import Image
5
+ # import base64
6
+ # from io import BytesIO
7
+
8
+ # # You can change these:
9
+ # BASE_MODEL = "runwayml/stable-diffusion-v1-5"
10
+ # CONTROLNET_ID = "lllyasviel/sd-controlnet-canny" # placeholder; change to a QR-focused ControlNet if you have one
11
+
12
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ # controlnet = ControlNetModel.from_pretrained(
15
+ # CONTROLNET_ID, torch_dtype=torch.float16 if device=="cuda" else torch.float32
16
+ # )
17
+
18
+ # pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
+ # BASE_MODEL,
20
+ # controlnet=controlnet,
21
+ # torch_dtype=torch.float16 if device=="cuda" else torch.float32,
22
+ # safety_checker=None
23
+ # )
24
+ # pipe.to(device)
25
+
26
+ # def generate(prompt, control_image, guidance_scale=7.5, steps=30, seed=0):
27
+ # generator = torch.Generator(device=device).manual_seed(int(seed)) if seed else None
28
+ # img = pipe(
29
+ # prompt=prompt,
30
+ # image=control_image,
31
+ # num_inference_steps=int(steps),
32
+ # guidance_scale=float(guidance_scale),
33
+ # generator=generator
34
+ # ).images[0]
35
+ # return img
36
+
37
+ # with gr.Blocks() as demo:
38
+ # gr.Markdown("# ControlNet Image Generator")
39
+ # with gr.Row():
40
+ # prompt = gr.Textbox(label="Prompt", value="A futuristic poster, high detail")
41
+ # seed = gr.Number(label="Seed (0=random)", value=0)
42
+ # with gr.Row():
43
+ # control = gr.Image(type="pil", label="Control image (e.g., QR or edge map)")
44
+ # steps = gr.Slider(10, 50, 30, step=1, label="Steps")
45
+ # guidance = gr.Slider(1.0, 12.0, 7.5, step=0.1, label="Guidance scale")
46
+ # out = gr.Image(label="Result")
47
+
48
+ # btn = gr.Button("Generate")
49
+ # btn.click(generate, [prompt, control, guidance, steps, seed], out)
50
+
51
+ # # Enable simple API use
52
+ # gr.Examples([], inputs=[prompt, control, guidance, steps, seed], outputs=out)
53
+
54
+ # demo.launch()
55
+
56
+
57
  import gradio as gr
 
 
58
  from PIL import Image
 
 
 
 
 
 
59
 
60
+ def generate(prompt, control_image, guidance, steps, seed):
61
+ # dummy return so Space builds
62
+ return control_image
63
 
64
+ demo = gr.Interface(
65
+ fn=generate,
66
+ inputs=[
67
+ gr.Textbox(label="Prompt"),
68
+ gr.Image(type="pil", label="Control Image"),
69
+ gr.Slider(1, 12, 7.5, label="Guidance scale"),
70
+ gr.Slider(10, 50, 30, step=1, label="Steps"),
71
+ gr.Number(0, label="Seed"),
72
+ ],
73
+ outputs=gr.Image(),
74
  )
75
 
76
+ if __name__ == "__main__":
77
+ demo.launch()