Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -31,7 +31,17 @@ from safetensors.torch import load_file, save_file
|
|
31 |
from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
|
32 |
#import jax
|
33 |
#import jax.numpy as jnp
|
34 |
-
from numba import jit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# logging
|
37 |
|
@@ -117,7 +127,12 @@ function custom(){
|
|
117 |
|
118 |
# functionality
|
119 |
|
120 |
-
@
|
|
|
|
|
|
|
|
|
|
|
121 |
def run(cmd, assert_success=False, capture_output=False, env=None, dry_run=False):
|
122 |
if dry_run:
|
123 |
print(f"--> {cmd}")
|
@@ -132,7 +147,12 @@ def run(cmd, assert_success=False, capture_output=False, env=None, dry_run=False
|
|
132 |
|
133 |
return result
|
134 |
|
135 |
-
@
|
|
|
|
|
|
|
|
|
|
|
136 |
def translate(text,lang):
|
137 |
if text == None or lang == None:
|
138 |
return ""
|
@@ -174,13 +194,23 @@ def translate(text,lang):
|
|
174 |
print(ret)
|
175 |
return ret
|
176 |
|
177 |
-
@
|
|
|
|
|
|
|
|
|
|
|
178 |
def generate_random_string(length):
|
179 |
characters = string.ascii_letters + string.digits
|
180 |
return ''.join(random.choice(characters) for _ in range(length))
|
181 |
|
182 |
-
|
183 |
-
@
|
|
|
|
|
|
|
|
|
|
|
184 |
def Piper(image,positive,negative,motion):
|
185 |
global last_motion
|
186 |
global ip_loaded
|
@@ -217,7 +247,12 @@ def Piper(image,positive,negative,motion):
|
|
217 |
num_frames=(fps*time)
|
218 |
)
|
219 |
|
220 |
-
@
|
|
|
|
|
|
|
|
|
|
|
221 |
def infer(pm):
|
222 |
print("infer: started")
|
223 |
|
@@ -239,7 +274,12 @@ def infer(pm):
|
|
239 |
export_to_gif(out.frames[0],name,fps=fps)
|
240 |
return name
|
241 |
|
242 |
-
@
|
|
|
|
|
|
|
|
|
|
|
243 |
def handle(i,m,p1,p2,*result):
|
244 |
p1_en = translate(p1,"english")
|
245 |
p2_en = translate(p2,"english")
|
@@ -254,7 +294,12 @@ def handle(i,m,p1,p2,*result):
|
|
254 |
ret.append(infer,pm)
|
255 |
return ret
|
256 |
|
257 |
-
@
|
|
|
|
|
|
|
|
|
|
|
258 |
def ui():
|
259 |
with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
|
260 |
with gr.Column(elem_id="col-container"):
|
@@ -308,7 +353,12 @@ def ui():
|
|
308 |
)
|
309 |
demo.queue().launch()
|
310 |
|
311 |
-
@
|
|
|
|
|
|
|
|
|
|
|
312 |
def pre():
|
313 |
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
314 |
pipe.scheduler = DDIMScheduler(
|
|
|
31 |
from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
|
32 |
#import jax
|
33 |
#import jax.numpy as jnp
|
34 |
+
from numba import njit as cpu1, jit as cpu2
|
35 |
+
from numba.cuda import njit as cuda1, jit as cuda2
|
36 |
+
|
37 |
+
# optimization:
|
38 |
+
|
39 |
+
# @cuda1(cache=True)
|
40 |
+
# @cuda2(cache=True)
|
41 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
42 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
43 |
+
# @cpu1(cache=True)
|
44 |
+
# @cpu2(cache=True)
|
45 |
|
46 |
# logging
|
47 |
|
|
|
127 |
|
128 |
# functionality
|
129 |
|
130 |
+
@cuda1(cache=True)
|
131 |
+
# @cuda2(cache=True)
|
132 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
133 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
134 |
+
# @cpu1(cache=True)
|
135 |
+
# @cpu2(cache=True)
|
136 |
def run(cmd, assert_success=False, capture_output=False, env=None, dry_run=False):
|
137 |
if dry_run:
|
138 |
print(f"--> {cmd}")
|
|
|
147 |
|
148 |
return result
|
149 |
|
150 |
+
@cuda1(cache=True)
|
151 |
+
# @cuda2(cache=True)
|
152 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
153 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
154 |
+
# @cpu1(cache=True)
|
155 |
+
# @cpu2(cache=True)
|
156 |
def translate(text,lang):
|
157 |
if text == None or lang == None:
|
158 |
return ""
|
|
|
194 |
print(ret)
|
195 |
return ret
|
196 |
|
197 |
+
@cuda1(cache=True)
|
198 |
+
# @cuda2(cache=True)
|
199 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
200 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
201 |
+
# @cpu1(cache=True)
|
202 |
+
# @cpu2(cache=True)
|
203 |
def generate_random_string(length):
|
204 |
characters = string.ascii_letters + string.digits
|
205 |
return ''.join(random.choice(characters) for _ in range(length))
|
206 |
|
207 |
+
@cuda1(cache=True)
|
208 |
+
# @cuda2(cache=True)
|
209 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
210 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
211 |
+
# @cpu1(cache=True)
|
212 |
+
# @cpu2(cache=True)
|
213 |
+
@spaces.GPU(duration=65)
|
214 |
def Piper(image,positive,negative,motion):
|
215 |
global last_motion
|
216 |
global ip_loaded
|
|
|
247 |
num_frames=(fps*time)
|
248 |
)
|
249 |
|
250 |
+
@cuda1(cache=True)
|
251 |
+
# @cuda2(cache=True)
|
252 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
253 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
254 |
+
# @cpu1(cache=True)
|
255 |
+
# @cpu2(cache=True)
|
256 |
def infer(pm):
|
257 |
print("infer: started")
|
258 |
|
|
|
274 |
export_to_gif(out.frames[0],name,fps=fps)
|
275 |
return name
|
276 |
|
277 |
+
@cuda1(cache=True)
|
278 |
+
# @cuda2(cache=True)
|
279 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
280 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
281 |
+
# @cpu1(cache=True)
|
282 |
+
# @cpu2(cache=True)
|
283 |
def handle(i,m,p1,p2,*result):
|
284 |
p1_en = translate(p1,"english")
|
285 |
p2_en = translate(p2,"english")
|
|
|
294 |
ret.append(infer,pm)
|
295 |
return ret
|
296 |
|
297 |
+
@cuda1(cache=True)
|
298 |
+
# @cuda2(cache=True)
|
299 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
300 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
301 |
+
# @cpu1(cache=True)
|
302 |
+
# @cpu2(cache=True)
|
303 |
def ui():
|
304 |
with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
|
305 |
with gr.Column(elem_id="col-container"):
|
|
|
353 |
)
|
354 |
demo.queue().launch()
|
355 |
|
356 |
+
@cuda1(cache=True)
|
357 |
+
# @cuda2(cache=True)
|
358 |
+
# @cpu1(cache=True,nopython=True,parallel=True)
|
359 |
+
# @cpu2(cache=True,nopython=True,parallel=True)
|
360 |
+
# @cpu1(cache=True)
|
361 |
+
# @cpu2(cache=True)
|
362 |
def pre():
|
363 |
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
364 |
pipe.scheduler = DDIMScheduler(
|