Spaces:
Runtime error
Runtime error
Duplicate from fffiloni/whisper-to-stable-diffusion
Browse filesCo-authored-by: Sylvain Filoni <[email protected]>
- .gitattributes +31 -0
- README.md +13 -0
- app.py +468 -0
- requirements.txt +7 -0
- unsafe.png +0 -0
.gitattributes
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Whisper To Stable Diffusion
|
3 |
+
emoji: ๐๐ผ๏ธ
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.9.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: fffiloni/whisper-to-stable-diffusion
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
#import torch
|
3 |
+
import whisper
|
4 |
+
from datetime import datetime
|
5 |
+
from PIL import Image
|
6 |
+
import flag
|
7 |
+
import os
|
8 |
+
#MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
|
9 |
+
|
10 |
+
#from diffusers import StableDiffusionPipeline
|
11 |
+
|
12 |
+
stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
|
13 |
+
### โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
14 |
+
|
15 |
+
title="Whisper to Stable Diffusion"
|
16 |
+
|
17 |
+
### โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
18 |
+
|
19 |
+
whisper_model = whisper.load_model("small")
|
20 |
+
|
21 |
+
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
22 |
+
|
23 |
+
#pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
|
24 |
+
#pipe.to(device)
|
25 |
+
|
26 |
+
### โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
27 |
+
|
28 |
+
def get_images(prompt):
|
29 |
+
gallery_dir = stable_diffusion(prompt, fn_index=2)
|
30 |
+
return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
|
31 |
+
|
32 |
+
|
33 |
+
def magic_whisper_to_sd(audio, guidance_scale, nb_iterations, seed):
|
34 |
+
|
35 |
+
whisper_results = translate(audio)
|
36 |
+
prompt = whisper_results[2]
|
37 |
+
images = get_images(prompt)
|
38 |
+
|
39 |
+
return whisper_results[0], whisper_results[1], whisper_results[2], images
|
40 |
+
|
41 |
+
#def diffuse(prompt, guidance_scale, nb_iterations, seed):
|
42 |
+
#
|
43 |
+
# generator = torch.Generator(device=device).manual_seed(int(seed))
|
44 |
+
#
|
45 |
+
# print("""
|
46 |
+
# โ
|
47 |
+
# Sending prompt to Stable Diffusion ...
|
48 |
+
# โ
|
49 |
+
# """)
|
50 |
+
# print("prompt: " + prompt)
|
51 |
+
# print("guidance scale: " + str(guidance_scale))
|
52 |
+
# print("inference steps: " + str(nb_iterations))
|
53 |
+
# print("seed: " + str(seed))
|
54 |
+
#
|
55 |
+
# images_list = pipe(
|
56 |
+
# [prompt] * 2,
|
57 |
+
# guidance_scale=guidance_scale,
|
58 |
+
# num_inference_steps=nb_iterations,
|
59 |
+
# generator=generator
|
60 |
+
# )
|
61 |
+
#
|
62 |
+
# images = []
|
63 |
+
#
|
64 |
+
# safe_image = Image.open(r"unsafe.png")
|
65 |
+
#
|
66 |
+
# for i, image in enumerate(images_list["sample"]):
|
67 |
+
# if(images_list["nsfw_content_detected"][i]):
|
68 |
+
# images.append(safe_image)
|
69 |
+
# else:
|
70 |
+
# images.append(image)
|
71 |
+
#
|
72 |
+
#
|
73 |
+
# print("Stable Diffusion has finished")
|
74 |
+
# print("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
|
75 |
+
#
|
76 |
+
# return images
|
77 |
+
|
78 |
+
def translate(audio):
|
79 |
+
print("""
|
80 |
+
โ
|
81 |
+
Sending audio to Whisper ...
|
82 |
+
โ
|
83 |
+
""")
|
84 |
+
# current dateTime
|
85 |
+
now = datetime.now()
|
86 |
+
# convert to string
|
87 |
+
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
88 |
+
print('DateTime String:', date_time_str)
|
89 |
+
|
90 |
+
audio = whisper.load_audio(audio)
|
91 |
+
audio = whisper.pad_or_trim(audio)
|
92 |
+
|
93 |
+
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
|
94 |
+
|
95 |
+
_, probs = whisper_model.detect_language(mel)
|
96 |
+
|
97 |
+
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
|
98 |
+
translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
|
99 |
+
|
100 |
+
transcription = whisper.decode(whisper_model, mel, transcript_options)
|
101 |
+
translation = whisper.decode(whisper_model, mel, translate_options)
|
102 |
+
|
103 |
+
print("language spoken: " + transcription.language)
|
104 |
+
print("transcript: " + transcription.text)
|
105 |
+
print("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
|
106 |
+
print("translated: " + translation.text)
|
107 |
+
if transcription.language == "en":
|
108 |
+
tr_flag = flag.flag('GB')
|
109 |
+
else:
|
110 |
+
tr_flag = flag.flag(transcription.language)
|
111 |
+
return tr_flag, transcription.text, translation.text
|
112 |
+
|
113 |
+
### โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
114 |
+
|
115 |
+
css = """
|
116 |
+
.container {
|
117 |
+
max-width: 880px;
|
118 |
+
margin: auto;
|
119 |
+
padding-top: 1.5rem;
|
120 |
+
}
|
121 |
+
a {
|
122 |
+
text-decoration: underline;
|
123 |
+
}
|
124 |
+
h1 {
|
125 |
+
font-weight: 900;
|
126 |
+
margin-bottom: 7px;
|
127 |
+
text-align: center;
|
128 |
+
font-size: 2em;
|
129 |
+
margin-bottom: 1em;
|
130 |
+
}
|
131 |
+
#w2sd_container{
|
132 |
+
margin-top: 20px;
|
133 |
+
}
|
134 |
+
.footer {
|
135 |
+
margin-bottom: 45px;
|
136 |
+
margin-top: 35px;
|
137 |
+
text-align: center;
|
138 |
+
border-bottom: 1px solid #e5e5e5;
|
139 |
+
}
|
140 |
+
.footer>p {
|
141 |
+
font-size: .8rem;
|
142 |
+
display: inline-block;
|
143 |
+
padding: 0 10px;
|
144 |
+
transform: translateY(10px);
|
145 |
+
background: white;
|
146 |
+
}
|
147 |
+
.dark .footer {
|
148 |
+
border-color: #303030;
|
149 |
+
}
|
150 |
+
.dark .footer>p {
|
151 |
+
background: #0b0f19;
|
152 |
+
}
|
153 |
+
.tabitem {
|
154 |
+
border-bottom-left-radius: 10px;
|
155 |
+
border-bottom-right-radius: 10px;
|
156 |
+
}
|
157 |
+
#record_tab, #upload_tab {
|
158 |
+
font-size: 1.2em;
|
159 |
+
}
|
160 |
+
#record_btn{
|
161 |
+
|
162 |
+
}
|
163 |
+
#record_btn > div > button > span {
|
164 |
+
width: 2.375rem;
|
165 |
+
height: 2.375rem;
|
166 |
+
}
|
167 |
+
#record_btn > div > button > span > span {
|
168 |
+
width: 2.375rem;
|
169 |
+
height: 2.375rem;
|
170 |
+
}
|
171 |
+
audio {
|
172 |
+
margin-bottom: 10px;
|
173 |
+
}
|
174 |
+
div#record_btn > .mt-6{
|
175 |
+
margin-top: 0!important;
|
176 |
+
}
|
177 |
+
div#record_btn > .mt-6 button {
|
178 |
+
font-size: 2em;
|
179 |
+
width: 100%;
|
180 |
+
padding: 20px;
|
181 |
+
height: 160px;
|
182 |
+
}
|
183 |
+
div#upload_area {
|
184 |
+
height: 11.1rem;
|
185 |
+
}
|
186 |
+
div#upload_area > div.w-full > div {
|
187 |
+
min-height: 9rem;
|
188 |
+
}
|
189 |
+
#check_btn_1, #check_btn_2{
|
190 |
+
color: #fff;
|
191 |
+
--tw-gradient-from: #4caf50;
|
192 |
+
--tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
|
193 |
+
--tw-gradient-to: #4caf50;
|
194 |
+
border-color: #8bc34a;
|
195 |
+
}
|
196 |
+
#magic_btn_1, #magic_btn_2{
|
197 |
+
color: #fff;
|
198 |
+
--tw-gradient-from: #f44336;
|
199 |
+
--tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
|
200 |
+
--tw-gradient-to: #ff9800;
|
201 |
+
border-color: #ff9800;
|
202 |
+
}
|
203 |
+
input::-webkit-inner-spin-button, input::-webkit-outer-spin-button {
|
204 |
+
-webkit-appearance: none;
|
205 |
+
}
|
206 |
+
input[type=number] {
|
207 |
+
-moz-appearance: textfield;
|
208 |
+
}
|
209 |
+
input[type=range] {
|
210 |
+
-webkit-appearance: none;
|
211 |
+
cursor: pointer;
|
212 |
+
height: 1px;
|
213 |
+
background: currentColor;
|
214 |
+
}
|
215 |
+
input[type=range]::-webkit-slider-thumb {
|
216 |
+
-webkit-appearance: none;
|
217 |
+
width: 0.5em;
|
218 |
+
height: 1.2em;
|
219 |
+
border-radius: 10px;
|
220 |
+
background: currentColor;
|
221 |
+
}
|
222 |
+
input[type=range]::-moz-range-thumb{
|
223 |
+
width: 0.5em;
|
224 |
+
height: 1.2em;
|
225 |
+
border-radius: 10px;
|
226 |
+
background: currentColor;
|
227 |
+
}
|
228 |
+
div#spoken_lang textarea {
|
229 |
+
font-size: 4em;
|
230 |
+
line-height: 1em;
|
231 |
+
text-align: center;
|
232 |
+
}
|
233 |
+
div#transcripted {
|
234 |
+
flex: 4;
|
235 |
+
}
|
236 |
+
div#translated textarea {
|
237 |
+
font-size: 1.5em;
|
238 |
+
line-height: 1.25em;
|
239 |
+
}
|
240 |
+
#sd_settings {
|
241 |
+
margin-bottom: 20px;
|
242 |
+
}
|
243 |
+
#diffuse_btn {
|
244 |
+
color: #fff;
|
245 |
+
font-size: 1em;
|
246 |
+
margin-bottom: 20px;
|
247 |
+
--tw-gradient-from: #4caf50;
|
248 |
+
--tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
|
249 |
+
--tw-gradient-to: #4caf50;
|
250 |
+
border-color: #8bc34a;
|
251 |
+
}
|
252 |
+
#notice {
|
253 |
+
padding: 20px 14px 10px;
|
254 |
+
display: flex;
|
255 |
+
align-content: space-evenly;
|
256 |
+
gap: 20px;
|
257 |
+
line-height: 1em;
|
258 |
+
font-size: .8em;
|
259 |
+
border: 1px solid #374151;
|
260 |
+
border-radius: 10px;
|
261 |
+
}
|
262 |
+
#about {
|
263 |
+
padding: 20px;
|
264 |
+
}
|
265 |
+
#notice > div {
|
266 |
+
flex: 1;
|
267 |
+
}
|
268 |
+
|
269 |
+
"""
|
270 |
+
|
271 |
+
### โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
272 |
+
|
273 |
+
with gr.Blocks(css=css) as demo:
|
274 |
+
with gr.Column():
|
275 |
+
gr.HTML('''
|
276 |
+
<h1>
|
277 |
+
Whisper to Stable Diffusion
|
278 |
+
</h1>
|
279 |
+
<p style='text-align: center;'>
|
280 |
+
Ask stable diffusion for images by speaking (or singing ๐ค) in your native language ! Try it in French ๐
|
281 |
+
</p>
|
282 |
+
|
283 |
+
<p style='text-align: center;'>
|
284 |
+
This demo is wired to the official SD Space โข Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> โข <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
|
285 |
+
โ
|
286 |
+
</p>
|
287 |
+
|
288 |
+
''')
|
289 |
+
# with gr.Row(elem_id="w2sd_container"):
|
290 |
+
# with gr.Column():
|
291 |
+
|
292 |
+
gr.Markdown(
|
293 |
+
"""
|
294 |
+
|
295 |
+
## 1. Record audio or Upload an audio file:
|
296 |
+
"""
|
297 |
+
)
|
298 |
+
|
299 |
+
with gr.Tab(label="Record audio input", elem_id="record_tab"):
|
300 |
+
with gr.Column():
|
301 |
+
record_input = gr.Audio(
|
302 |
+
source="microphone",
|
303 |
+
type="filepath",
|
304 |
+
show_label=False,
|
305 |
+
elem_id="record_btn"
|
306 |
+
)
|
307 |
+
with gr.Row():
|
308 |
+
audio_r_translate = gr.Button("Check Whisper first ? ๐", elem_id="check_btn_1")
|
309 |
+
audio_r_direct_sd = gr.Button("Magic Whisper โบ SD right now!", elem_id="magic_btn_1")
|
310 |
+
|
311 |
+
with gr.Tab(label="Upload audio input", elem_id="upload_tab"):
|
312 |
+
with gr.Column():
|
313 |
+
upload_input = gr.Audio(
|
314 |
+
source="upload",
|
315 |
+
type="filepath",
|
316 |
+
show_label=False,
|
317 |
+
elem_id="upload_area"
|
318 |
+
)
|
319 |
+
with gr.Row():
|
320 |
+
audio_u_translate = gr.Button("Check Whisper first ? ๐", elem_id="check_btn_2")
|
321 |
+
audio_u_direct_sd = gr.Button("Magic Whisper โบ SD right now!", elem_id="magic_btn_2")
|
322 |
+
|
323 |
+
with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False):
|
324 |
+
with gr.Row():
|
325 |
+
guidance_scale = gr.Slider(2, 15, value = 7, label = 'Guidance Scale')
|
326 |
+
nb_iterations = gr.Slider(10, 50, value = 25, step = 1, label = 'Steps')
|
327 |
+
seed = gr.Slider(label = "Seed", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
|
328 |
+
|
329 |
+
gr.Markdown(
|
330 |
+
"""
|
331 |
+
## 2. Check Whisper output, correct it if necessary:
|
332 |
+
"""
|
333 |
+
)
|
334 |
+
|
335 |
+
with gr.Row():
|
336 |
+
|
337 |
+
transcripted_output = gr.Textbox(
|
338 |
+
label="Transcription in your detected spoken language",
|
339 |
+
lines=3,
|
340 |
+
elem_id="transcripted"
|
341 |
+
)
|
342 |
+
language_detected_output = gr.Textbox(label="Native language", elem_id="spoken_lang",lines=3)
|
343 |
+
|
344 |
+
with gr.Column():
|
345 |
+
translated_output = gr.Textbox(
|
346 |
+
label="Transcript translated in English by Whisper",
|
347 |
+
lines=4,
|
348 |
+
elem_id="translated"
|
349 |
+
)
|
350 |
+
with gr.Row():
|
351 |
+
clear_btn = gr.Button(value="Clear")
|
352 |
+
diffuse_btn = gr.Button(value="OK, Diffuse this prompt !", elem_id="diffuse_btn")
|
353 |
+
|
354 |
+
clear_btn.click(fn=lambda value: gr.update(value=""), inputs=clear_btn, outputs=translated_output)
|
355 |
+
|
356 |
+
|
357 |
+
|
358 |
+
|
359 |
+
|
360 |
+
# with gr.Column():
|
361 |
+
|
362 |
+
|
363 |
+
|
364 |
+
gr.Markdown("""
|
365 |
+
## 3. Wait for Stable Diffusion Results โ๏ธ
|
366 |
+
Inference time is about ~20-30 seconds, when it's your turn ๐ฌ
|
367 |
+
"""
|
368 |
+
)
|
369 |
+
|
370 |
+
sd_output = gr.Gallery().style(grid=2, height="auto")
|
371 |
+
|
372 |
+
|
373 |
+
gr.Markdown("""
|
374 |
+
### ๐ About the models
|
375 |
+
<p style='font-size: 1em;line-height: 1.5em;'>
|
376 |
+
<strong>Whisper</strong> is a general-purpose speech recognition model.<br /><br />
|
377 |
+
It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. <br />
|
378 |
+
โ
|
379 |
+
</p>
|
380 |
+
<p style='font-size: 1em;line-height: 1.5em;'>
|
381 |
+
<strong>Stable Diffusion</strong> is a state of the art text-to-image model that generates images from text.
|
382 |
+
</p>
|
383 |
+
<div id="notice">
|
384 |
+
<div>
|
385 |
+
LICENSE
|
386 |
+
<p style='font-size: 0.8em;'>
|
387 |
+
The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank">CreativeML Open RAIL-M</a> license.</p>
|
388 |
+
<p style='font-size: 0.8em;'>
|
389 |
+
The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license.</p>
|
390 |
+
<p style='font-size: 0.8em;'>
|
391 |
+
The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups.</p>
|
392 |
+
<p style='font-size: 0.8em;'>
|
393 |
+
For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" target="_blank">read the license</a>.
|
394 |
+
</p>
|
395 |
+
</div>
|
396 |
+
<div>
|
397 |
+
Biases and content acknowledgment
|
398 |
+
<p style='font-size: 0.8em;'>
|
399 |
+
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence.</p>
|
400 |
+
<p style='font-size: 0.8em;'>
|
401 |
+
The model was trained on the <a href="https://laion.ai/blog/laion-5b/" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.</p>
|
402 |
+
<p style='font-size: 0.8em;'> You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" target="_blank">model card</a>.
|
403 |
+
</p>
|
404 |
+
</div>
|
405 |
+
</div>
|
406 |
+
|
407 |
+
""", elem_id="about")
|
408 |
+
|
409 |
+
audio_r_translate.click(translate,
|
410 |
+
inputs = record_input,
|
411 |
+
outputs = [
|
412 |
+
language_detected_output,
|
413 |
+
transcripted_output,
|
414 |
+
translated_output
|
415 |
+
])
|
416 |
+
|
417 |
+
audio_u_translate.click(translate,
|
418 |
+
inputs = upload_input,
|
419 |
+
outputs = [
|
420 |
+
language_detected_output,
|
421 |
+
transcripted_output,
|
422 |
+
translated_output
|
423 |
+
])
|
424 |
+
|
425 |
+
audio_r_direct_sd.click(magic_whisper_to_sd,
|
426 |
+
inputs = [
|
427 |
+
record_input,
|
428 |
+
guidance_scale,
|
429 |
+
nb_iterations,
|
430 |
+
seed
|
431 |
+
],
|
432 |
+
outputs = [
|
433 |
+
language_detected_output,
|
434 |
+
transcripted_output,
|
435 |
+
translated_output,
|
436 |
+
sd_output
|
437 |
+
])
|
438 |
+
|
439 |
+
audio_u_direct_sd.click(magic_whisper_to_sd,
|
440 |
+
inputs = [
|
441 |
+
upload_input,
|
442 |
+
guidance_scale,
|
443 |
+
nb_iterations,
|
444 |
+
seed
|
445 |
+
],
|
446 |
+
outputs = [
|
447 |
+
language_detected_output,
|
448 |
+
transcripted_output,
|
449 |
+
translated_output,
|
450 |
+
sd_output
|
451 |
+
])
|
452 |
+
|
453 |
+
diffuse_btn.click(get_images,
|
454 |
+
inputs = [
|
455 |
+
translated_output
|
456 |
+
],
|
457 |
+
outputs = sd_output
|
458 |
+
)
|
459 |
+
gr.HTML('''
|
460 |
+
<div class="footer">
|
461 |
+
<p>Whisper by <a href="https://github.com/openai/whisper" target="_blank">OpenAI</a> - Stable Diffusion by <a href="https://huggingface.co/CompVis" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" target="_blank">Stability AI</a>
|
462 |
+
</p>
|
463 |
+
</div>
|
464 |
+
''')
|
465 |
+
|
466 |
+
|
467 |
+
if __name__ == "__main__":
|
468 |
+
demo.queue(max_size=32, concurrency_count=20).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#torch
|
2 |
+
scipy
|
3 |
+
ftfy
|
4 |
+
#transformers
|
5 |
+
emoji-country-flag
|
6 |
+
#git+https://github.com/LambdaLabsML/lambda-diffusers.git#egg=lambda-diffusers
|
7 |
+
git+https://github.com/openai/whisper.git
|
unsafe.png
ADDED
![]() |