Spaces:
Runtime error
Runtime error
Commit
Β·
be60e58
0
Parent(s):
Duplicate from AlexWortega/Kandinsky2.1
Browse filesCo-authored-by: Wortega <[email protected]>
- .gitattributes +34 -0
- NatallE.png +0 -0
- README.md +13 -0
- app.py +221 -0
- kandi2.png +0 -0
- packages.txt +3 -0
- requirements.txt +4 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
NatallE.png
ADDED
![]() |
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Kandinsky2.0
|
3 |
+
emoji: π
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.11.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: AlexWortega/Kandinsky2.1
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
import torch
|
4 |
+
from torch import autocast
|
5 |
+
from kandinsky2 import get_kandinsky2
|
6 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
7 |
+
|
8 |
+
|
9 |
+
from kandinsky2 import get_kandinsky2
|
10 |
+
model = get_kandinsky2('cuda', task_type='text2img', model_version='2.1', use_flash_attention=False)
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
def infer(prompt):
|
15 |
+
images = model.generate_text2img(prompt, num_steps=100,
|
16 |
+
batch_size=1, guidance_scale=4,
|
17 |
+
h=768, w=768,
|
18 |
+
sampler='p_sampler', prior_cf_scale=4,
|
19 |
+
prior_steps="5",)
|
20 |
+
return images
|
21 |
+
|
22 |
+
css = """
|
23 |
+
.gradio-container {
|
24 |
+
font-family: 'IBM Plex Sans', sans-serif;
|
25 |
+
}
|
26 |
+
.gr-button {
|
27 |
+
color: white;
|
28 |
+
border-color: black;
|
29 |
+
background: black;
|
30 |
+
}
|
31 |
+
input[type='range'] {
|
32 |
+
accent-color: black;
|
33 |
+
}
|
34 |
+
.dark input[type='range'] {
|
35 |
+
accent-color: #dfdfdf;
|
36 |
+
}
|
37 |
+
.container {
|
38 |
+
max-width: 730px;
|
39 |
+
margin: auto;
|
40 |
+
padding-top: 1.5rem;
|
41 |
+
}
|
42 |
+
#gallery {
|
43 |
+
min-height: 22rem;
|
44 |
+
margin-bottom: 15px;
|
45 |
+
margin-left: auto;
|
46 |
+
margin-right: auto;
|
47 |
+
border-bottom-right-radius: .5rem !important;
|
48 |
+
border-bottom-left-radius: .5rem !important;
|
49 |
+
}
|
50 |
+
#gallery>div>.h-full {
|
51 |
+
min-height: 20rem;
|
52 |
+
}
|
53 |
+
.details:hover {
|
54 |
+
text-decoration: underline;
|
55 |
+
}
|
56 |
+
.gr-button {
|
57 |
+
white-space: nowrap;
|
58 |
+
}
|
59 |
+
.gr-button:focus {
|
60 |
+
border-color: rgb(147 197 253 / var(--tw-border-opacity));
|
61 |
+
outline: none;
|
62 |
+
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
63 |
+
--tw-border-opacity: 1;
|
64 |
+
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
65 |
+
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
|
66 |
+
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
|
67 |
+
--tw-ring-opacity: .5;
|
68 |
+
}
|
69 |
+
#advanced-btn {
|
70 |
+
font-size: .7rem !important;
|
71 |
+
line-height: 19px;
|
72 |
+
margin-top: 12px;
|
73 |
+
margin-bottom: 12px;
|
74 |
+
padding: 2px 8px;
|
75 |
+
border-radius: 14px !important;
|
76 |
+
}
|
77 |
+
#advanced-options {
|
78 |
+
display: none;
|
79 |
+
margin-bottom: 20px;
|
80 |
+
}
|
81 |
+
.footer {
|
82 |
+
margin-bottom: 45px;
|
83 |
+
margin-top: 35px;
|
84 |
+
text-align: center;
|
85 |
+
border-bottom: 1px solid #e5e5e5;
|
86 |
+
}
|
87 |
+
.footer>p {
|
88 |
+
font-size: .8rem;
|
89 |
+
display: inline-block;
|
90 |
+
padding: 0 10px;
|
91 |
+
transform: translateY(10px);
|
92 |
+
background: white;
|
93 |
+
}
|
94 |
+
.dark .footer {
|
95 |
+
border-color: #303030;
|
96 |
+
}
|
97 |
+
.dark .footer>p {
|
98 |
+
background: #0b0f19;
|
99 |
+
}
|
100 |
+
.acknowledgments h4{
|
101 |
+
margin: 1.25em 0 .25em 0;
|
102 |
+
font-weight: bold;
|
103 |
+
font-size: 115%;
|
104 |
+
}
|
105 |
+
#container-advanced-btns{
|
106 |
+
display: flex;
|
107 |
+
flex-wrap: wrap;
|
108 |
+
justify-content: space-between;
|
109 |
+
align-items: center;
|
110 |
+
}
|
111 |
+
.animate-spin {
|
112 |
+
animation: spin 1s linear infinite;
|
113 |
+
}
|
114 |
+
@keyframes spin {
|
115 |
+
from {
|
116 |
+
transform: rotate(0deg);
|
117 |
+
}
|
118 |
+
to {
|
119 |
+
transform: rotate(360deg);
|
120 |
+
}
|
121 |
+
}
|
122 |
+
#share-btn-container {
|
123 |
+
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
|
124 |
+
}
|
125 |
+
#share-btn {
|
126 |
+
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
|
127 |
+
}
|
128 |
+
#share-btn * {
|
129 |
+
all: unset;
|
130 |
+
}
|
131 |
+
.gr-form{
|
132 |
+
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
|
133 |
+
}
|
134 |
+
#prompt-container{
|
135 |
+
gap: 0;
|
136 |
+
}
|
137 |
+
#generated_id{
|
138 |
+
min-height: 700px
|
139 |
+
}
|
140 |
+
"""
|
141 |
+
block = gr.Blocks(css=css)
|
142 |
+
|
143 |
+
examples = [
|
144 |
+
|
145 |
+
[
|
146 |
+
'Thinking man in anime style'
|
147 |
+
],
|
148 |
+
|
149 |
+
]
|
150 |
+
|
151 |
+
with block as demo:
|
152 |
+
gr.Markdown("""
|
153 |
+
|
154 |
+
|
155 |
+
[](https://pytorch.org/) [](https://huggingface.co/sberbank-ai/Kandinsky_2.0)
|
156 |
+
[Offical BlogPost](https://habr.com/ru/company/sberbank/blog/725282/)
|
157 |
+
|
158 |
+
## Model architecture:
|
159 |
+
Kandinsky 2.1 inherits best practicies from Dall-E 2 and Latent diffucion, while introducing some new ideas.
|
160 |
+
|
161 |
+
As text and image encoder it uses CLIP model and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.
|
162 |
+
|
163 |
+
For diffusion mapping of latent spaces we use transformer with num_layers=20, num_heads=32 and hidden_size=2048.
|
164 |
+
|
165 |
+
Other architecture parts:
|
166 |
+
|
167 |
+
- Text encoder (XLM-Roberta-Large-Vit-L-14) - 560M
|
168 |
+
- Diffusion Image Prior β 1B
|
169 |
+
- CLIP image encoder (ViT-L/14) - 427M
|
170 |
+
- Latent Diffusion U-Net - 1.22B
|
171 |
+
- MoVQ encoder/decoder - 67M
|
172 |
+
|
173 |
+
Kandinsky 2.1 was trained on a large-scale image-text dataset LAION HighRes and fine-tuned on our internal datasets.
|
174 |
+
|
175 |
+
**Kandinsky 2.0** architecture overview:
|
176 |
+

|
177 |
+
|
178 |
+
"""
|
179 |
+
)
|
180 |
+
with gr.Group():
|
181 |
+
with gr.Box():
|
182 |
+
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
183 |
+
|
184 |
+
text = gr.Textbox(
|
185 |
+
label="Enter your prompt", show_label=False, max_lines=1
|
186 |
+
).style(
|
187 |
+
border=(True, False, True, True),
|
188 |
+
rounded=(True, False, False, True),
|
189 |
+
container=False,
|
190 |
+
)
|
191 |
+
btn = gr.Button("Run").style(
|
192 |
+
margin=False,
|
193 |
+
rounded=(False, True, True, False),
|
194 |
+
)
|
195 |
+
|
196 |
+
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="generated_id").style(
|
197 |
+
grid=[2], height="auto"
|
198 |
+
)
|
199 |
+
|
200 |
+
ex = gr.Examples(examples=examples, fn=infer, inputs=[text], outputs=gallery, cache_examples=True)
|
201 |
+
ex.dataset.headers = [""]
|
202 |
+
|
203 |
+
text.submit(infer, inputs=[text], outputs=gallery)
|
204 |
+
btn.click(infer, inputs=[text], outputs=gallery)
|
205 |
+
gr.Markdown("""
|
206 |
+
|
207 |
+
|
208 |
+
# Authors
|
209 |
+
|
210 |
+
+ Arseniy Shakhmatov: [Github](https://github.com/cene555), [Blog](https://t.me/gradientdip)
|
211 |
+
+ Anton Razzhigaev: [Github](https://github.com/razzant), [Blog](https://t.me/abstractDL)
|
212 |
+
+ Aleksandr Nikolich: [Github](https://github.com/AlexWortega), [Blog](https://t.me/lovedeathtransformers)
|
213 |
+
+ Vladimir Arkhipkin: [Github](https://github.com/oriBetelgeuse)
|
214 |
+
+ Igor Pavlov: [Github](https://github.com/boomb0om)
|
215 |
+
+ Andrey Kuznetsov: [Github](https://github.com/kuznetsoffandrey)
|
216 |
+
+ Denis Dimitrov: [Github](https://github.com/denndimitrov)
|
217 |
+
|
218 |
+
"""
|
219 |
+
)
|
220 |
+
|
221 |
+
demo.queue(max_size=2).launch()
|
kandi2.png
ADDED
![]() |
packages.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
ffmpeg
|
2 |
+
libsm6
|
3 |
+
libxext6
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/ai-forever/Kandinsky-2.git
|
2 |
+
gradio
|
3 |
+
opencv-python
|
4 |
+
git+https://github.com/openai/CLIP.git
|