Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import random
|
|
4 |
import spaces
|
5 |
import os
|
6 |
import torch
|
|
|
|
|
7 |
|
8 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
9 |
from huggingface_hub import login
|
@@ -30,19 +32,25 @@ pipe = DiffusionPipeline.from_pretrained(
|
|
30 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
31 |
pipe.load_lora_weights("ZennyKenny/flux_lora_natalie-diffusion")
|
32 |
|
|
|
|
|
|
|
33 |
MAX_SEED = np.iinfo(np.int32).max
|
34 |
MAX_IMAGE_SIZE = 2048
|
35 |
|
|
|
|
|
|
|
36 |
@spaces.GPU(duration=75)
|
37 |
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
38 |
if randomize_seed:
|
39 |
seed = random.randint(0, MAX_SEED)
|
40 |
generator = torch.Generator().manual_seed(seed)
|
41 |
|
42 |
-
|
43 |
|
44 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
45 |
-
prompt=
|
46 |
guidance_scale=guidance_scale,
|
47 |
num_inference_steps=num_inference_steps,
|
48 |
width=width,
|
@@ -50,7 +58,14 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
|
|
50 |
generator=generator,
|
51 |
output_type="pil",
|
52 |
):
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
examples = [
|
56 |
"a man walking in the forest",
|
@@ -62,14 +77,12 @@ css = """
|
|
62 |
#left-column {
|
63 |
padding: 1rem;
|
64 |
}
|
65 |
-
|
66 |
#right-column {
|
67 |
display: flex;
|
68 |
align-items: center;
|
69 |
justify-content: center;
|
70 |
padding: 1rem;
|
71 |
}
|
72 |
-
|
73 |
#generated-image > img {
|
74 |
border-radius: 12px;
|
75 |
box-shadow: 0px 0px 12px rgba(0, 0, 0, 0.2);
|
@@ -111,14 +124,13 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
|
|
111 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5)
|
112 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
|
113 |
|
114 |
-
# Use hidden result for examples only
|
115 |
result_example = gr.Image(visible=False)
|
116 |
|
117 |
gr.Examples(
|
118 |
examples=examples,
|
119 |
fn=infer,
|
120 |
inputs=[prompt],
|
121 |
-
outputs=[result_example, seed],
|
122 |
cache_examples=True,
|
123 |
cache_mode="lazy"
|
124 |
)
|
@@ -126,11 +138,16 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
|
|
126 |
with gr.Column(scale=1, elem_id="right-column"):
|
127 |
result = gr.Image(label="", show_label=False, elem_id="generated-image")
|
128 |
|
|
|
|
|
|
|
|
|
|
|
129 |
gr.on(
|
130 |
triggers=[run_button.click, prompt.submit],
|
131 |
fn=infer,
|
132 |
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
133 |
-
outputs=[result, seed],
|
134 |
)
|
135 |
|
136 |
if __name__ == "__main__":
|
|
|
4 |
import spaces
|
5 |
import os
|
6 |
import torch
|
7 |
+
import re
|
8 |
+
from PIL import Image
|
9 |
|
10 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
11 |
from huggingface_hub import login
|
|
|
32 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
33 |
pipe.load_lora_weights("ZennyKenny/flux_lora_natalie-diffusion")
|
34 |
|
35 |
+
# Ensure image_preview dir exists
|
36 |
+
os.makedirs("image_preview", exist_ok=True)
|
37 |
+
|
38 |
MAX_SEED = np.iinfo(np.int32).max
|
39 |
MAX_IMAGE_SIZE = 2048
|
40 |
|
41 |
+
def sanitize_filename(name):
|
42 |
+
return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:80]
|
43 |
+
|
44 |
@spaces.GPU(duration=75)
|
45 |
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
46 |
if randomize_seed:
|
47 |
seed = random.randint(0, MAX_SEED)
|
48 |
generator = torch.Generator().manual_seed(seed)
|
49 |
|
50 |
+
full_prompt = f"XTON {prompt}"
|
51 |
|
52 |
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
53 |
+
prompt=full_prompt,
|
54 |
guidance_scale=guidance_scale,
|
55 |
num_inference_steps=num_inference_steps,
|
56 |
width=width,
|
|
|
58 |
generator=generator,
|
59 |
output_type="pil",
|
60 |
):
|
61 |
+
# Save image to /image_preview
|
62 |
+
safe_name = sanitize_filename(prompt)
|
63 |
+
img_path = f"image_preview/{safe_name}_{seed}.jpg"
|
64 |
+
img.convert("RGB").save(img_path, "JPEG", quality=60)
|
65 |
+
|
66 |
+
# Return image and updated list of previews
|
67 |
+
previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
|
68 |
+
return img, seed, previews
|
69 |
|
70 |
examples = [
|
71 |
"a man walking in the forest",
|
|
|
77 |
#left-column {
|
78 |
padding: 1rem;
|
79 |
}
|
|
|
80 |
#right-column {
|
81 |
display: flex;
|
82 |
align-items: center;
|
83 |
justify-content: center;
|
84 |
padding: 1rem;
|
85 |
}
|
|
|
86 |
#generated-image > img {
|
87 |
border-radius: 12px;
|
88 |
box-shadow: 0px 0px 12px rgba(0, 0, 0, 0.2);
|
|
|
124 |
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5)
|
125 |
num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
|
126 |
|
|
|
127 |
result_example = gr.Image(visible=False)
|
128 |
|
129 |
gr.Examples(
|
130 |
examples=examples,
|
131 |
fn=infer,
|
132 |
inputs=[prompt],
|
133 |
+
outputs=[result_example, seed, gr.Gallery(visible=False)],
|
134 |
cache_examples=True,
|
135 |
cache_mode="lazy"
|
136 |
)
|
|
|
138 |
with gr.Column(scale=1, elem_id="right-column"):
|
139 |
result = gr.Image(label="", show_label=False, elem_id="generated-image")
|
140 |
|
141 |
+
gr_state = gr.State([]) # internal list of previews
|
142 |
+
with gr.Column():
|
143 |
+
gr.Markdown("<h3 style='text-align:center;'>Generated Images Preview</h3>")
|
144 |
+
gallery = gr.Gallery(label="", columns=4, height="auto", object_fit="cover")
|
145 |
+
|
146 |
gr.on(
|
147 |
triggers=[run_button.click, prompt.submit],
|
148 |
fn=infer,
|
149 |
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
150 |
+
outputs=[result, seed, gallery],
|
151 |
)
|
152 |
|
153 |
if __name__ == "__main__":
|