Yaron Koresh commited on
Commit
70f75dc
·
verified ·
1 Parent(s): 60f77fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -23,22 +23,22 @@ import jax
23
  import jax.numpy as jnp
24
 
25
  last_motion=None
26
- fps=14
27
- time=1
28
- width=448
29
- height=448
30
  device = "cuda"
31
  dtype = torch.float16
32
  result=[]
33
- step = 30
34
- accu=7.5
35
- #repo = "ByteDance/AnimateDiff-Lightning"
36
- #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
37
  base = "emilianJR/epiCRealism"
38
  #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
39
  #vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
40
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
41
- adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
42
 
43
  css="""
44
  input, input::placeholder {
@@ -59,7 +59,7 @@ footer {
59
  max-width: 15cm;
60
  }
61
  .image-container {
62
- aspect-ratio: """+width+"/"+height+""" !important;
63
  }
64
  .dropdown-arrow {
65
  display: none !important;
@@ -193,7 +193,7 @@ def run(i,m,p1,p2,*result):
193
 
194
  return out
195
 
196
- pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
197
  pipe.scheduler = DDIMScheduler(
198
  clip_sample=False,
199
  beta_start=0.00085,
@@ -202,6 +202,7 @@ pipe.scheduler = DDIMScheduler(
202
  timestep_spacing="trailing",
203
  steps_offset=1
204
  )
 
205
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
206
  pipe.enable_vae_slicing()
207
  pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
 
23
  import jax.numpy as jnp
24
 
25
  last_motion=None
26
+ fps=20
27
+ time=3
28
+ width=576
29
+ height=1024
30
  device = "cuda"
31
  dtype = torch.float16
32
  result=[]
33
+ step = 2
34
+ accu=1
35
+ repo = "ByteDance/AnimateDiff-Lightning"
36
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
37
  base = "emilianJR/epiCRealism"
38
  #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
39
  #vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
40
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
41
+ #adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
42
 
43
  css="""
44
  input, input::placeholder {
 
59
  max-width: 15cm;
60
  }
61
  .image-container {
62
+ aspect-ratio: """+str(width)+"/"+str(height)+""" !important;
63
  }
64
  .dropdown-arrow {
65
  display: none !important;
 
193
 
194
  return out
195
 
196
+ pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype).to(device)
197
  pipe.scheduler = DDIMScheduler(
198
  clip_sample=False,
199
  beta_start=0.00085,
 
202
  timestep_spacing="trailing",
203
  steps_offset=1
204
  )
205
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
206
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
207
  pipe.enable_vae_slicing()
208
  pipe.enable_free_init(method="butterworth", use_fast_sampling=False)