Spaces:
Running
on
Zero
Running
on
Zero
Tanut
commited on
Commit
·
d0e494f
1
Parent(s):
b83d18f
Test ZeroGPU
Browse files
app.py
CHANGED
@@ -3,7 +3,21 @@ import gradio as gr
|
|
3 |
import torch, spaces
|
4 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
5 |
|
6 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
zero = torch.tensor([0.0])
|
8 |
print("startup device:", zero.device) # should print: cpu
|
9 |
|
@@ -38,15 +52,14 @@ def snap8(x: int) -> int:
|
|
38 |
x = max(256, min(1024, int(x)))
|
39 |
return x - (x % 8)
|
40 |
|
41 |
-
#
|
42 |
-
@spaces.GPU
|
43 |
def greet(n: float):
|
44 |
print("inside greet, cuda available:", torch.cuda.is_available())
|
45 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
46 |
t = zero.to(device) + torch.tensor([float(n)], device=device)
|
47 |
return f"Hello {t.item():.3f} Tensor (device: {t.device})"
|
48 |
|
49 |
-
#
|
50 |
@spaces.GPU(duration=120)
|
51 |
def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, height: int, seed: int):
|
52 |
pipe = get_pipe()
|
@@ -72,7 +85,7 @@ def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, hei
|
|
72 |
)
|
73 |
return out.images[0]
|
74 |
|
75 |
-
# ====== UI
|
76 |
with gr.Blocks() as demo:
|
77 |
gr.Markdown("# ZeroGPU demo + Stable Diffusion 1.5 (minimal)")
|
78 |
|
@@ -95,5 +108,10 @@ with gr.Blocks() as demo:
|
|
95 |
)
|
96 |
|
97 |
if __name__ == "__main__":
|
98 |
-
#
|
99 |
-
demo.launch(
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import torch, spaces
|
4 |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
|
5 |
|
6 |
+
# --- gradio_client bool-schema hotfix (safe no-op if not needed) ---
|
7 |
+
try:
|
8 |
+
import gradio_client.utils as _gcu
|
9 |
+
_orig_get_type = _gcu.get_type
|
10 |
+
def _get_type_safe(schema):
|
11 |
+
# Handle JSON Schema booleans (e.g., additionalProperties: True/False)
|
12 |
+
if isinstance(schema, bool):
|
13 |
+
return "any"
|
14 |
+
return _orig_get_type(schema)
|
15 |
+
_gcu.get_type = _get_type_safe
|
16 |
+
except Exception:
|
17 |
+
pass
|
18 |
+
# -------------------------------------------------------------------
|
19 |
+
|
20 |
+
# sanity: show CPU at startup (no GPU yet)
|
21 |
zero = torch.tensor([0.0])
|
22 |
print("startup device:", zero.device) # should print: cpu
|
23 |
|
|
|
52 |
x = max(256, min(1024, int(x)))
|
53 |
return x - (x % 8)
|
54 |
|
55 |
+
@spaces.GPU # tiny demo proving CUDA is present inside GPU block
|
|
|
56 |
def greet(n: float):
|
57 |
print("inside greet, cuda available:", torch.cuda.is_available())
|
58 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
59 |
t = zero.to(device) + torch.tensor([float(n)], device=device)
|
60 |
return f"Hello {t.item():.3f} Tensor (device: {t.device})"
|
61 |
|
62 |
+
# SD 1.5 text -> image
|
63 |
@spaces.GPU(duration=120)
|
64 |
def generate(prompt: str, negative: str, steps: int, cfg: float, width: int, height: int, seed: int):
|
65 |
pipe = get_pipe()
|
|
|
85 |
)
|
86 |
return out.images[0]
|
87 |
|
88 |
+
# ====== UI ======
|
89 |
with gr.Blocks() as demo:
|
90 |
gr.Markdown("# ZeroGPU demo + Stable Diffusion 1.5 (minimal)")
|
91 |
|
|
|
108 |
)
|
109 |
|
110 |
if __name__ == "__main__":
|
111 |
+
# On Spaces, expose a shareable URL & bind to 0.0.0.0
|
112 |
+
demo.queue(max_size=8).launch(
|
113 |
+
server_name="0.0.0.0",
|
114 |
+
server_port=7860,
|
115 |
+
share=True,
|
116 |
+
show_api=True,
|
117 |
+
)
|