Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -42,7 +42,9 @@ def sanitize_filename(name):
|
|
42 |
return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:80]
|
43 |
|
44 |
@spaces.GPU(duration=75)
|
45 |
-
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
46 |
if randomize_seed:
|
47 |
seed = random.randint(0, MAX_SEED)
|
48 |
generator = torch.Generator().manual_seed(seed)
|
@@ -63,7 +65,6 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
|
|
63 |
img_path = f"image_preview/{safe_name}_{seed}.jpg"
|
64 |
img.convert("RGB").save(img_path, "JPEG", quality=60)
|
65 |
|
66 |
-
# Return image and updated list of previews
|
67 |
previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
|
68 |
return img, seed, previews
|
69 |
|
@@ -86,6 +87,12 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
|
|
86 |
""")
|
87 |
|
88 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
prompt = gr.Text(
|
90 |
label="Prompt",
|
91 |
show_label=False,
|
@@ -109,14 +116,14 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
|
|
109 |
|
110 |
result_example = gr.Image(visible=False)
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
|
121 |
with gr.Column(scale=1, elem_id="right-column"):
|
122 |
result = gr.Image(label="", show_label=False, elem_id="generated-image")
|
@@ -129,9 +136,10 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
|
|
129 |
gr.on(
|
130 |
triggers=[run_button.click, prompt.submit],
|
131 |
fn=infer,
|
132 |
-
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
133 |
outputs=[result, seed, gallery],
|
134 |
)
|
135 |
|
|
|
136 |
if __name__ == "__main__":
|
137 |
natalie_diffusion.launch()
|
|
|
42 |
return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:80]
|
43 |
|
44 |
@spaces.GPU(duration=75)
|
45 |
+
def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
46 |
+
login(token=user_token) # Authenticate for this call only
|
47 |
+
|
48 |
if randomize_seed:
|
49 |
seed = random.randint(0, MAX_SEED)
|
50 |
generator = torch.Generator().manual_seed(seed)
|
|
|
65 |
img_path = f"image_preview/{safe_name}_{seed}.jpg"
|
66 |
img.convert("RGB").save(img_path, "JPEG", quality=60)
|
67 |
|
|
|
68 |
previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
|
69 |
return img, seed, previews
|
70 |
|
|
|
87 |
""")
|
88 |
|
89 |
with gr.Row():
|
90 |
+
hf_token_input = gr.Textbox(
|
91 |
+
label="Your Hugging Face API Token",
|
92 |
+
placeholder="Paste your token here",
|
93 |
+
type="password",
|
94 |
+
)
|
95 |
+
|
96 |
prompt = gr.Text(
|
97 |
label="Prompt",
|
98 |
show_label=False,
|
|
|
116 |
|
117 |
result_example = gr.Image(visible=False)
|
118 |
|
119 |
+
gr.Examples(
|
120 |
+
examples=examples,
|
121 |
+
fn=infer,
|
122 |
+
inputs=[hf_token_input, prompt],
|
123 |
+
outputs=[result_example, seed, gr.Gallery(visible=False)],
|
124 |
+
cache_examples=False, # Don't cache examples with real tokens!
|
125 |
+
)
|
126 |
+
|
127 |
|
128 |
with gr.Column(scale=1, elem_id="right-column"):
|
129 |
result = gr.Image(label="", show_label=False, elem_id="generated-image")
|
|
|
136 |
gr.on(
|
137 |
triggers=[run_button.click, prompt.submit],
|
138 |
fn=infer,
|
139 |
+
inputs=[hf_token_input, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
140 |
outputs=[result, seed, gallery],
|
141 |
)
|
142 |
|
143 |
+
|
144 |
if __name__ == "__main__":
|
145 |
natalie_diffusion.launch()
|