Spaces:
Running
Running
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ time=2
|
|
28 |
device = "cuda"
|
29 |
dtype = torch.float16
|
30 |
result=[]
|
31 |
-
step =
|
32 |
|
33 |
css="""
|
34 |
input, input::placeholder {
|
@@ -120,7 +120,7 @@ def generate_random_string(length):
|
|
120 |
characters = string.ascii_letters + string.digits
|
121 |
return ''.join(random.choice(characters) for _ in range(length))
|
122 |
|
123 |
-
@spaces.GPU(duration=
|
124 |
def Piper(name,positive_prompt,negative,motion):
|
125 |
global step
|
126 |
global fps
|
@@ -142,7 +142,7 @@ def Piper(name,positive_prompt,negative,motion):
|
|
142 |
height=1024,
|
143 |
width=576,
|
144 |
num_inference_steps=step,
|
145 |
-
guidance_scale=
|
146 |
num_frames=(fps*time)
|
147 |
)
|
148 |
|
@@ -156,7 +156,7 @@ def infer(pm):
|
|
156 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
157 |
if p1 != "":
|
158 |
_do.append(f'{p1}')
|
159 |
-
posi = " ".join(_do)
|
160 |
|
161 |
out = Piper(name,posi,neg,pm["m"])
|
162 |
export_to_gif(out.frames[0],name,fps=fps)
|
@@ -180,7 +180,7 @@ def run(m,p1,p2,*result):
|
|
180 |
|
181 |
return out
|
182 |
|
183 |
-
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3")
|
184 |
#vae = AutoencoderKL.from_single_file("https://huggingface.co/stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors")
|
185 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
186 |
|
@@ -189,7 +189,7 @@ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
|
189 |
base = "emilianJR/epiCRealism"
|
190 |
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
191 |
|
192 |
-
pipe = AnimateDiffPipeline.from_pretrained(base,
|
193 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
194 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
195 |
pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
|
|
|
28 |
device = "cuda"
|
29 |
dtype = torch.float16
|
30 |
result=[]
|
31 |
+
step = 8
|
32 |
|
33 |
css="""
|
34 |
input, input::placeholder {
|
|
|
120 |
characters = string.ascii_letters + string.digits
|
121 |
return ''.join(random.choice(characters) for _ in range(length))
|
122 |
|
123 |
+
@spaces.GPU(duration=85)
|
124 |
def Piper(name,positive_prompt,negative,motion):
|
125 |
global step
|
126 |
global fps
|
|
|
142 |
height=1024,
|
143 |
width=576,
|
144 |
num_inference_steps=step,
|
145 |
+
guidance_scale=7,
|
146 |
num_frames=(fps*time)
|
147 |
)
|
148 |
|
|
|
156 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
157 |
if p1 != "":
|
158 |
_do.append(f'{p1}')
|
159 |
+
posi = " ".join(_do)
|
160 |
|
161 |
out = Piper(name,posi,neg,pm["m"])
|
162 |
export_to_gif(out.frames[0],name,fps=fps)
|
|
|
180 |
|
181 |
return out
|
182 |
|
183 |
+
#adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3")
|
184 |
#vae = AutoencoderKL.from_single_file("https://huggingface.co/stabilityai/sd-vae-ft-mse-original/vae-ft-mse-840000-ema-pruned.safetensors")
|
185 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
186 |
|
|
|
189 |
base = "emilianJR/epiCRealism"
|
190 |
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
191 |
|
192 |
+
pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype, token=os.getenv("hf_token")).to(device)
|
193 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
194 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
195 |
pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
|