Spaces:
Sleeping
Sleeping
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -75,7 +75,7 @@ def generate_random_string(length):
|
|
75 |
return ''.join(random.choice(characters) for _ in range(length))
|
76 |
|
77 |
@spaces.GPU(duration=75)
|
78 |
-
def Piper(name,positive_prompt,motion):
|
79 |
global step
|
80 |
global fps
|
81 |
global time
|
@@ -96,12 +96,11 @@ def Piper(name,positive_prompt,motion):
|
|
96 |
|
97 |
out = pipe(
|
98 |
positive_prompt,
|
|
|
99 |
height=1024,
|
100 |
width=576,
|
101 |
num_inference_steps=step,
|
102 |
guidance_scale=1,
|
103 |
-
callback=progress_callback,
|
104 |
-
callback_step=1,
|
105 |
num_frames=(fps*time)
|
106 |
)
|
107 |
|
@@ -156,14 +155,12 @@ def infer(pm):
|
|
156 |
name = generate_random_string(12)+".png"
|
157 |
neg = pm["n"]
|
158 |
|
159 |
-
if neg != "":
|
160 |
-
neg=f' (((({neg}))))'
|
161 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
162 |
if p1 != "":
|
163 |
_do.append(f'{p1}')
|
164 |
posi = " ".join(_do)+neg
|
165 |
|
166 |
-
return Piper(name,posi,pm["m"])
|
167 |
|
168 |
def run(m,p1,p2,*result):
|
169 |
|
@@ -209,15 +206,24 @@ def main():
|
|
209 |
repo="stabilityai/sd-vae-ft-mse-original"
|
210 |
ckpt="vae-ft-mse-840000-ema-pruned.safetensors"
|
211 |
vae = load_file(hf_hub_download(repo, ckpt), device=device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
|
|
|
|
|
|
|
|
213 |
repo = "ByteDance/AnimateDiff-Lightning"
|
214 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
215 |
-
|
216 |
-
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
217 |
-
#base = "emilianJR/epiCRealism"
|
218 |
base = "black-forest-labs/FLUX.1-schnell"
|
219 |
|
220 |
-
pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype, token=os.getenv("hf_token")).to(device)
|
221 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
222 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
223 |
pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
|
|
|
75 |
return ''.join(random.choice(characters) for _ in range(length))
|
76 |
|
77 |
@spaces.GPU(duration=75)
|
78 |
+
def Piper(name,positive_prompt,negative,motion):
|
79 |
global step
|
80 |
global fps
|
81 |
global time
|
|
|
96 |
|
97 |
out = pipe(
|
98 |
positive_prompt,
|
99 |
+
negative_prompt=negative,
|
100 |
height=1024,
|
101 |
width=576,
|
102 |
num_inference_steps=step,
|
103 |
guidance_scale=1,
|
|
|
|
|
104 |
num_frames=(fps*time)
|
105 |
)
|
106 |
|
|
|
155 |
name = generate_random_string(12)+".png"
|
156 |
neg = pm["n"]
|
157 |
|
|
|
|
|
158 |
_do = ['beautiful', 'playful', 'photographed', 'realistic', 'dynamic poze', 'deep field', 'reasonable coloring', 'rough texture', 'best quality', 'focused']
|
159 |
if p1 != "":
|
160 |
_do.append(f'{p1}')
|
161 |
posi = " ".join(_do)+neg
|
162 |
|
163 |
+
return Piper(name,posi,neg,pm["m"])
|
164 |
|
165 |
def run(m,p1,p2,*result):
|
166 |
|
|
|
206 |
repo="stabilityai/sd-vae-ft-mse-original"
|
207 |
ckpt="vae-ft-mse-840000-ema-pruned.safetensors"
|
208 |
vae = load_file(hf_hub_download(repo, ckpt), device=device)
|
209 |
+
|
210 |
+
repo="ByteDance/SDXL-Lightning"
|
211 |
+
ckpt=f"sdxl_lightning_{step}step_unet.safetensors"
|
212 |
+
unet = load_file(hf_hub_download(repo, ckpt), device=device)
|
213 |
+
|
214 |
+
repo = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
215 |
+
ckpt="feature_extractor/preprocessor_config.json"
|
216 |
+
fext = load_file(hf_hub_download(repo, ckpt), device=device)
|
217 |
|
218 |
+
#repo = "emilianJR/epiCRealism"
|
219 |
+
#ckpt = "unet/diffusion_pytorch_model.safetensors"
|
220 |
+
#unet = load_file(hf_hub_download(repo, ckpt), device=device)
|
221 |
+
|
222 |
repo = "ByteDance/AnimateDiff-Lightning"
|
223 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
|
|
|
|
|
|
224 |
base = "black-forest-labs/FLUX.1-schnell"
|
225 |
|
226 |
+
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, feature_extractor=fext, unet=unet, torch_dtype=dtype, token=os.getenv("hf_token")).to(device)
|
227 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
228 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
229 |
pipe.enable_free_init(method="butterworth", use_fast_sampling=False)
|