Yaron Koresh commited on
Commit
bb70c22
·
verified ·
1 Parent(s): 37cafa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -27
app.py CHANGED
@@ -22,7 +22,7 @@ formatter = logging.Formatter('\n >>> [%(levelname)s] %(asctime)s %(name)s: %(me
22
  handler2.setFormatter(formatter)
23
  root.addHandler(handler2)
24
 
25
- def cmd(cmd, assert_success=False, capture_output=False, env=None, dry_run=False):
26
  if dry_run:
27
  print(f"--> {cmd}")
28
  result = 1
@@ -36,28 +36,34 @@ def cmd(cmd, assert_success=False, capture_output=False, env=None, dry_run=False
36
 
37
  return result
38
 
39
- cmd("apt install python3-mpi4py")
40
- cmd("pip install -r req.txt")
41
 
42
- import spaces
43
- import torch
44
- import gradio as gr
45
- import numpy as np
46
- from lxml.html import fromstring
47
- #from transformers import pipeline
48
- from torch import multiprocessing as mp, nn
49
- #from torch.multiprocessing import Pool
50
- #from pathos.multiprocessing import ProcessPool as Pool
51
- #from pathos.threading import ThreadPool as Pool
52
- #from diffusers.pipelines.flux import FluxPipeline
53
- from diffusers.utils import export_to_gif, load_image
54
- from diffusers.models.modeling_utils import ModelMixin
55
- from huggingface_hub import hf_hub_download
56
- from safetensors.torch import load_file, save_file
57
- from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
58
- #import jax
59
- #import jax.numpy as jnp
60
- from pyina.launchers import TorqueMpiPool as Pool
 
 
 
 
 
 
61
 
62
  last_motion=None
63
  dtype = torch.float16
@@ -67,7 +73,7 @@ device = "cuda"
67
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
68
  base = "emilianJR/epiCRealism"
69
  #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
70
- #vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
71
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
72
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
73
 
@@ -76,7 +82,7 @@ fps=10
76
  time=1
77
  width=384
78
  height=768
79
- step = 25
80
  accu=10
81
 
82
  css="""
@@ -223,7 +229,7 @@ def infer(pm):
223
  export_to_gif(out.frames[0],name,fps=fps)
224
  return name
225
 
226
- def run(i,m,p1,p2,*result):
227
 
228
  p1_en = translate(p1,"english")
229
  p2_en = translate(p2,"english")
@@ -235,7 +241,7 @@ def run(i,m,p1,p2,*result):
235
  with Pool(f'{ ln }:ppn=2', queue='productionQ', timelimit='5:00:00', workdir='.') as pool:
236
  return pool.map(infer,arr)
237
 
238
- pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
239
  pipe.scheduler = DDIMScheduler(
240
  clip_sample=False,
241
  beta_start=0.00085,
@@ -299,6 +305,6 @@ with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
299
 
300
  gr.on(
301
  triggers=[run_button.click, prompt.submit, prompt2.submit],
302
- fn=run,inputs=[img,motion,prompt,prompt2,*result],outputs=result
303
  )
304
  demo.queue().launch()
 
22
  handler2.setFormatter(formatter)
23
  root.addHandler(handler2)
24
 
25
+ def run(cmd, assert_success=False, capture_output=False, env=None, dry_run=False):
26
  if dry_run:
27
  print(f"--> {cmd}")
28
  result = 1
 
36
 
37
  return result
38
 
39
+ run("apt install python3-mpi4py")
40
+ run"pip install -r req.txt")
41
 
42
+ def deps():
43
+ try:
44
+ import spaces
45
+ import torch
46
+ import gradio as gr
47
+ import numpy as np
48
+ from lxml.html import fromstring
49
+ #from transformers import pipeline
50
+ from torch import multiprocessing as mp, nn
51
+ #from torch.multiprocessing import Pool
52
+ #from pathos.multiprocessing import ProcessPool as Pool
53
+ #from pathos.threading import ThreadPool as Pool
54
+ #from diffusers.pipelines.flux import FluxPipeline
55
+ from diffusers.utils import export_to_gif, load_image
56
+ from diffusers.models.modeling_utils import ModelMixin
57
+ from huggingface_hub import hf_hub_download
58
+ from safetensors.torch import load_file, save_file
59
+ from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
60
+ #import jax
61
+ #import jax.numpy as jnp
62
+ from pyina.launchers import TorqueMpiPool as Pool
63
+ except:
64
+ pass
65
+
66
+ deps()
67
 
68
  last_motion=None
69
  dtype = torch.float16
 
73
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
74
  base = "emilianJR/epiCRealism"
75
  #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
76
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
77
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
78
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
79
 
 
82
  time=1
83
  width=384
84
  height=768
85
+ step=40
86
  accu=10
87
 
88
  css="""
 
229
  export_to_gif(out.frames[0],name,fps=fps)
230
  return name
231
 
232
+ def main(i,m,p1,p2,*result):
233
 
234
  p1_en = translate(p1,"english")
235
  p2_en = translate(p2,"english")
 
241
  with Pool(f'{ ln }:ppn=2', queue='productionQ', timelimit='5:00:00', workdir='.') as pool:
242
  return pool.map(infer,arr)
243
 
244
+ pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
245
  pipe.scheduler = DDIMScheduler(
246
  clip_sample=False,
247
  beta_start=0.00085,
 
305
 
306
  gr.on(
307
  triggers=[run_button.click, prompt.submit, prompt2.submit],
308
+ fn=main,inputs=[img,motion,prompt,prompt2,*result],outputs=result
309
  )
310
  demo.queue().launch()