Yaron Koresh commited on
Commit
0fc6336
·
verified ·
1 Parent(s): 3876fb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -29,6 +29,13 @@ device = "cuda"
29
  dtype = torch.float16
30
  result=[]
31
  step = 2
 
 
 
 
 
 
 
32
 
33
  css="""
34
  input, input::placeholder {
@@ -122,12 +129,9 @@ def generate_random_string(length):
122
 
123
  @spaces.GPU(duration=65)
124
  def Piper(name,positive_prompt,negative,motion):
125
- global step
126
- global fps
127
- global time
128
  global last_motion
129
- global base
130
- global device
131
 
132
  if last_motion != motion:
133
  pipe.unload_lora_weights()
@@ -135,7 +139,7 @@ def Piper(name,positive_prompt,negative,motion):
135
  pipe.load_lora_weights(motion, adapter_name="motion")
136
  pipe.set_adapters(["motion"], [0.7])
137
  last_motion = motion
138
-
139
  return pipe(
140
  positive_prompt,
141
  negative_prompt=negative,
@@ -180,18 +184,8 @@ def run(m,p1,p2,*result):
180
 
181
  return out
182
 
183
- #adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3")
184
- #vae = AutoencoderKL.from_single_file("https://huggingface.co/stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors")
185
- #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
186
-
187
- repo = "ByteDance/AnimateDiff-Lightning"
188
- ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
189
- base = "emilianJR/epiCRealism"
190
- #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
191
-
192
  pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype).to(device)
193
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
194
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
195
  pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
196
 
197
  mp.set_start_method("spawn", force=True)
 
29
  dtype = torch.float16
30
  result=[]
31
  step = 2
32
+ repo = "ByteDance/AnimateDiff-Lightning"
33
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
34
+ base = "emilianJR/epiCRealism"
35
+ #base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
36
+ #adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3")
37
+ #vae = AutoencoderKL.from_single_file("https://huggingface.co/stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors")
38
+ #unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
39
 
40
  css="""
41
  input, input::placeholder {
 
129
 
130
  @spaces.GPU(duration=65)
131
  def Piper(name,positive_prompt,negative,motion):
 
 
 
132
  global last_motion
133
+
134
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
135
 
136
  if last_motion != motion:
137
  pipe.unload_lora_weights()
 
139
  pipe.load_lora_weights(motion, adapter_name="motion")
140
  pipe.set_adapters(["motion"], [0.7])
141
  last_motion = motion
142
+
143
  return pipe(
144
  positive_prompt,
145
  negative_prompt=negative,
 
184
 
185
  return out
186
 
 
 
 
 
 
 
 
 
 
187
  pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype).to(device)
188
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
 
189
  pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
190
 
191
  mp.set_start_method("spawn", force=True)