Yaron Koresh commited on
Commit
562b4d5
·
verified ·
1 Parent(s): 42c98a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -41,23 +41,26 @@ handler2.setFormatter(formatter)
41
  root.addHandler(handler2)
42
 
43
  last_motion=None
44
- fps=10
45
- time=1
46
- width=448
47
- height=448
48
- device = "cuda"
49
  dtype = torch.float16
50
  result=[]
51
- step = 15
52
- accu=2
53
  #repo = "ByteDance/AnimateDiff-Lightning"
54
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
55
- #base = "emilianJR/epiCRealism"
56
- base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
57
- vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
58
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
59
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
60
 
 
 
 
 
 
 
 
 
 
61
  css="""
62
  input, input::placeholder {
63
  text-align: center !important;
@@ -217,7 +220,7 @@ def run(i,m,p1,p2,*result):
217
 
218
  return out
219
 
220
- pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
221
  pipe.scheduler = DDIMScheduler(
222
  clip_sample=False,
223
  beta_start=0.00085,
@@ -229,7 +232,7 @@ pipe.scheduler = DDIMScheduler(
229
  #pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
230
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
231
  pipe.enable_vae_slicing()
232
- pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
233
 
234
  mp.set_start_method("spawn", force=True)
235
 
 
41
  root.addHandler(handler2)
42
 
43
  last_motion=None
 
 
 
 
 
44
  dtype = torch.float16
45
  result=[]
46
+ device = "cuda"
 
47
  #repo = "ByteDance/AnimateDiff-Lightning"
48
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
49
+ base = "emilianJR/epiCRealism"
50
+ #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
51
+ #vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
52
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
53
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
54
 
55
+ fasts=True
56
+ fps=15
57
+ time=20
58
+ width=320
59
+ height=640
60
+ step = 25
61
+ accu=4
62
+
63
+
64
  css="""
65
  input, input::placeholder {
66
  text-align: center !important;
 
220
 
221
  return out
222
 
223
+ pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
224
  pipe.scheduler = DDIMScheduler(
225
  clip_sample=False,
226
  beta_start=0.00085,
 
232
  #pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
233
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
234
  pipe.enable_vae_slicing()
235
+ pipe.enable_free_init(method="butterworth", use_fast_sampling=fasts)
236
 
237
  mp.set_start_method("spawn", force=True)
238