Yaron Koresh commited on
Commit
3b44ab7
·
verified ·
1 Parent(s): 6294ba8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -25,7 +25,7 @@ from lxml.html import fromstring
25
  from diffusers.utils import export_to_gif, load_image
26
  from huggingface_hub import hf_hub_download
27
  from safetensors.torch import load_file, save_file
28
- from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
29
  from functools import partial
30
 
31
  # logging
@@ -52,7 +52,7 @@ device = "cuda"
52
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
53
  #base = "emilianJR/epiCRealism"
54
  base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
55
- vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
56
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
57
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
58
 
@@ -116,14 +116,11 @@ function custom(){
116
 
117
  # torch pipe
118
 
119
- pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
120
- pipe.scheduler = DDIMScheduler(
121
- clip_sample=False,
122
- beta_start=0.00085,
123
- beta_end=0.012,
124
  beta_schedule="linear",
125
- timestep_spacing="trailing",
126
- steps_offset=1
127
  )
128
  #pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
129
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
 
25
  from diffusers.utils import export_to_gif, load_image
26
  from huggingface_hub import hf_hub_download
27
  from safetensors.torch import load_file, save_file
28
+ from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerAncestralDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
29
  from functools import partial
30
 
31
  # logging
 
52
  #ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
53
  #base = "emilianJR/epiCRealism"
54
  base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
55
+ #vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
56
  #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
57
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
58
 
 
116
 
117
  # torch pipe
118
 
119
+ pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
120
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_pretrained(
121
+ base,
122
+ subfolder="scheduler",
 
123
  beta_schedule="linear",
 
 
124
  )
125
  #pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
126
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")