Spaces:
Runtime error
Runtime error
Commit
·
5e1017c
0
Parent(s):
Duplicate from huggingface-projects/stable-diffusion-latent-upscaler
Browse filesCo-authored-by: Patrick von Platen <[email protected]>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +263 -0
- nsfw.png +0 -0
- requirements.txt +9 -0
- style.css +36 -0
- utils.py +6 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Stable Diffusion Latent Upscaler
|
3 |
+
emoji: ➕
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.15.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: mit
|
11 |
+
duplicated_from: huggingface-projects/stable-diffusion-latent-upscaler
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import (
|
2 |
+
StableDiffusionPipeline,
|
3 |
+
DPMSolverMultistepScheduler,
|
4 |
+
DiffusionPipeline,
|
5 |
+
)
|
6 |
+
import gradio as gr
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
+
import time
|
10 |
+
import psutil
|
11 |
+
import random
|
12 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
13 |
+
|
14 |
+
|
15 |
+
start_time = time.time()
|
16 |
+
current_steps = 25
|
17 |
+
|
18 |
+
SAFETY_CHECKER = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker", torch_dtype=torch.float16)
|
19 |
+
|
20 |
+
UPSCALER = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
|
21 |
+
UPSCALER.to("cuda")
|
22 |
+
UPSCALER.enable_xformers_memory_efficient_attention()
|
23 |
+
|
24 |
+
|
25 |
+
class Model:
|
26 |
+
def __init__(self, name, path=""):
|
27 |
+
self.name = name
|
28 |
+
self.path = path
|
29 |
+
|
30 |
+
if path != "":
|
31 |
+
self.pipe_t2i = StableDiffusionPipeline.from_pretrained(
|
32 |
+
path, torch_dtype=torch.float16, safety_checker=SAFETY_CHECKER
|
33 |
+
)
|
34 |
+
self.pipe_t2i.scheduler = DPMSolverMultistepScheduler.from_config(
|
35 |
+
self.pipe_t2i.scheduler.config
|
36 |
+
)
|
37 |
+
else:
|
38 |
+
self.pipe_t2i = None
|
39 |
+
|
40 |
+
|
41 |
+
models = [
|
42 |
+
#Model("Stable Diffusion v1-4", "CompVis/stable-diffusion-v1-4"),
|
43 |
+
# Model("Stable Diffusion v1-5", "runwayml/stable-diffusion-v1-5"),
|
44 |
+
Model("anything-v4.0", "xyn-ai/anything-v4.0"),
|
45 |
+
]
|
46 |
+
|
47 |
+
MODELS = {m.name: m for m in models}
|
48 |
+
|
49 |
+
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
50 |
+
|
51 |
+
|
52 |
+
def error_str(error, title="Error"):
|
53 |
+
return (
|
54 |
+
f"""#### {title}
|
55 |
+
{error}"""
|
56 |
+
if error
|
57 |
+
else ""
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
def inference(
|
62 |
+
prompt,
|
63 |
+
neg_prompt,
|
64 |
+
guidance,
|
65 |
+
steps,
|
66 |
+
seed,
|
67 |
+
model_name,
|
68 |
+
):
|
69 |
+
|
70 |
+
print(psutil.virtual_memory()) # print memory usage
|
71 |
+
|
72 |
+
if seed == 0:
|
73 |
+
seed = random.randint(0, 2147483647)
|
74 |
+
|
75 |
+
generator = torch.Generator("cuda").manual_seed(seed)
|
76 |
+
|
77 |
+
try:
|
78 |
+
low_res_image, up_res_image = txt_to_img(
|
79 |
+
model_name,
|
80 |
+
prompt,
|
81 |
+
neg_prompt,
|
82 |
+
guidance,
|
83 |
+
steps,
|
84 |
+
generator,
|
85 |
+
)
|
86 |
+
return low_res_image, up_res_image, f"Done. Seed: {seed}",
|
87 |
+
except Exception as e:
|
88 |
+
return None, None, error_str(e)
|
89 |
+
|
90 |
+
|
91 |
+
def txt_to_img(
|
92 |
+
model_name,
|
93 |
+
prompt,
|
94 |
+
neg_prompt,
|
95 |
+
guidance,
|
96 |
+
steps,
|
97 |
+
generator,
|
98 |
+
):
|
99 |
+
pipe = MODELS[model_name].pipe_t2i
|
100 |
+
|
101 |
+
if torch.cuda.is_available():
|
102 |
+
pipe = pipe.to("cuda")
|
103 |
+
pipe.enable_xformers_memory_efficient_attention()
|
104 |
+
|
105 |
+
low_res_latents = pipe(
|
106 |
+
prompt,
|
107 |
+
negative_prompt=neg_prompt,
|
108 |
+
num_inference_steps=int(steps),
|
109 |
+
guidance_scale=guidance,
|
110 |
+
generator=generator,
|
111 |
+
output_type="latent",
|
112 |
+
).images
|
113 |
+
|
114 |
+
with torch.no_grad():
|
115 |
+
low_res_image = pipe.decode_latents(low_res_latents)
|
116 |
+
low_res_image = pipe.numpy_to_pil(low_res_image)
|
117 |
+
|
118 |
+
up_res_image = UPSCALER(
|
119 |
+
prompt=prompt,
|
120 |
+
negative_prompt=neg_prompt,
|
121 |
+
image=low_res_latents,
|
122 |
+
num_inference_steps=20,
|
123 |
+
guidance_scale=0,
|
124 |
+
generator=generator,
|
125 |
+
).images
|
126 |
+
|
127 |
+
pipe.to("cpu")
|
128 |
+
torch.cuda.empty_cache()
|
129 |
+
|
130 |
+
return low_res_image[0], up_res_image[0]
|
131 |
+
|
132 |
+
|
133 |
+
def replace_nsfw_images(results):
|
134 |
+
for i in range(len(results.images)):
|
135 |
+
if results.nsfw_content_detected[i]:
|
136 |
+
results.images[i] = Image.open("nsfw.png")
|
137 |
+
return results.images
|
138 |
+
|
139 |
+
|
140 |
+
with gr.Blocks(css="style.css") as demo:
|
141 |
+
gr.HTML(
|
142 |
+
f"""
|
143 |
+
<div class="finetuned-diffusion-div">
|
144 |
+
<div style="text-align: center">
|
145 |
+
<h1>Anything v4 model + <a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Stable Diffusion Latent Upscaler</a></h1>
|
146 |
+
<p>
|
147 |
+
Demo for the <a href="https://huggingface.co/andite/anything-v4.0">Anything v4</a> model hooked with the ultra-fast <a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Latent Upscaler</a>
|
148 |
+
</p>
|
149 |
+
</div>
|
150 |
+
<!--
|
151 |
+
<p>To skip the queue, you can duplicate this Space<br>
|
152 |
+
<a style="display:inline-block" href="https://huggingface.co/spaces/patrickvonplaten/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
|
153 |
+
-->
|
154 |
+
</div>
|
155 |
+
"""
|
156 |
+
)
|
157 |
+
|
158 |
+
with gr.Column(scale=100):
|
159 |
+
with gr.Group(visible=False):
|
160 |
+
model_name = gr.Dropdown(
|
161 |
+
label="Model",
|
162 |
+
choices=[m.name for m in models],
|
163 |
+
value=models[0].name,
|
164 |
+
visible=False
|
165 |
+
)
|
166 |
+
|
167 |
+
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
168 |
+
with gr.Column():
|
169 |
+
prompt = gr.Textbox(
|
170 |
+
label="Enter your prompt",
|
171 |
+
show_label=False,
|
172 |
+
max_lines=1,
|
173 |
+
placeholder="Enter your prompt",
|
174 |
+
elem_id="prompt-text-input",
|
175 |
+
).style(
|
176 |
+
border=(True, False, True, True),
|
177 |
+
rounded=(True, False, False, True),
|
178 |
+
container=False,
|
179 |
+
)
|
180 |
+
neg_prompt = gr.Textbox(
|
181 |
+
label="Enter your negative prompt",
|
182 |
+
show_label=False,
|
183 |
+
max_lines=1,
|
184 |
+
placeholder="Enter a negative prompt",
|
185 |
+
elem_id="negative-prompt-text-input",
|
186 |
+
).style(
|
187 |
+
border=(True, False, True, True),
|
188 |
+
rounded=(True, False, False, True),
|
189 |
+
container=False,
|
190 |
+
)
|
191 |
+
generate = gr.Button("Generate image").style(
|
192 |
+
margin=False,
|
193 |
+
rounded=(False, True, True, False),
|
194 |
+
full_width=False,
|
195 |
+
)
|
196 |
+
|
197 |
+
with gr.Accordion("Advanced Options", open=False):
|
198 |
+
with gr.Group():
|
199 |
+
with gr.Row():
|
200 |
+
guidance = gr.Slider(
|
201 |
+
label="Guidance scale", value=7.5, maximum=15
|
202 |
+
)
|
203 |
+
steps = gr.Slider(
|
204 |
+
label="Steps",
|
205 |
+
value=current_steps,
|
206 |
+
minimum=2,
|
207 |
+
maximum=75,
|
208 |
+
step=1,
|
209 |
+
)
|
210 |
+
|
211 |
+
seed = gr.Slider(
|
212 |
+
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
213 |
+
)
|
214 |
+
|
215 |
+
|
216 |
+
with gr.Column(scale=100):
|
217 |
+
with gr.Row():
|
218 |
+
with gr.Column(scale=75):
|
219 |
+
up_res_image = gr.Image(label="Upscaled 1024px Image", shape=(1024, 1024))
|
220 |
+
with gr.Column(scale=25):
|
221 |
+
low_res_image = gr.Image(label="Original 512px Image", shape=(512, 512))
|
222 |
+
error_output = gr.Markdown()
|
223 |
+
|
224 |
+
inputs = [
|
225 |
+
prompt,
|
226 |
+
neg_prompt,
|
227 |
+
guidance,
|
228 |
+
steps,
|
229 |
+
seed,
|
230 |
+
model_name,
|
231 |
+
]
|
232 |
+
outputs = [low_res_image, up_res_image, error_output]
|
233 |
+
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
234 |
+
generate.click(inference, inputs=inputs, outputs=outputs)
|
235 |
+
|
236 |
+
ex = gr.Examples(
|
237 |
+
[
|
238 |
+
["a mecha robot in a favela", "low quality", 7.5, 25, 33, models[0].name],
|
239 |
+
["the spirit of a tamagotchi wandering in the city of Paris", "low quality, bad render", 7.5, 50, 85, models[0].name],
|
240 |
+
],
|
241 |
+
inputs=[prompt, neg_prompt, guidance, steps, seed, model_name],
|
242 |
+
outputs=outputs,
|
243 |
+
fn=inference,
|
244 |
+
cache_examples=True,
|
245 |
+
)
|
246 |
+
ex.dataset.headers = [""]
|
247 |
+
|
248 |
+
gr.HTML(
|
249 |
+
"""
|
250 |
+
<div style="border-top: 1px solid #303030;">
|
251 |
+
<br>
|
252 |
+
<p>Space by 🤗 Hugging Face, models by Stability AI, andite, linaqruf and others ❤️</p>
|
253 |
+
<p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
|
254 |
+
<p>This is a Demo Space For:<br>
|
255 |
+
<a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Stability AI's Latent Upscaler</a>
|
256 |
+
</div>
|
257 |
+
"""
|
258 |
+
)
|
259 |
+
|
260 |
+
print(f"Space built in {time.time() - start_time:.2f} seconds")
|
261 |
+
|
262 |
+
demo.queue(concurrency_count=1)
|
263 |
+
demo.launch()
|
nsfw.png
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
+
torch
|
3 |
+
git+https://github.com/huggingface/diffusers.git
|
4 |
+
git+https://github.com/huggingface/transformers
|
5 |
+
scipy
|
6 |
+
ftfy
|
7 |
+
psutil
|
8 |
+
accelerate
|
9 |
+
xformers
|
style.css
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.container {
|
2 |
+
max-width: 960px
|
3 |
+
}
|
4 |
+
|
5 |
+
.finetuned-diffusion-div div {
|
6 |
+
align-items: center;
|
7 |
+
gap: .8rem;
|
8 |
+
font-size: 1.75rem
|
9 |
+
}
|
10 |
+
|
11 |
+
.finetuned-diffusion-div div h1 {
|
12 |
+
font-weight: 900;
|
13 |
+
margin-bottom: 7px
|
14 |
+
}
|
15 |
+
|
16 |
+
.finetuned-diffusion-div div p {
|
17 |
+
font-size: 50%
|
18 |
+
}
|
19 |
+
|
20 |
+
.finetuned-diffusion-div p {
|
21 |
+
margin-bottom: 10px;
|
22 |
+
font-size: 94%
|
23 |
+
}
|
24 |
+
|
25 |
+
a {
|
26 |
+
text-decoration: underline
|
27 |
+
}
|
28 |
+
|
29 |
+
.tabs {
|
30 |
+
margin-top: 0;
|
31 |
+
margin-bottom: 0
|
32 |
+
}
|
33 |
+
|
34 |
+
#gallery {
|
35 |
+
min-height: 20rem
|
36 |
+
}
|
utils.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def is_google_colab():
|
2 |
+
try:
|
3 |
+
import google.colab
|
4 |
+
return True
|
5 |
+
except:
|
6 |
+
return False
|