Spaces:
Sleeping
Sleeping
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import gradio as gr
|
|
9 |
import numpy as np
|
10 |
from lxml.html import fromstring
|
11 |
from pathos.threading import ThreadPool as Pool
|
12 |
-
from diffusers import AnimateDiffSDXLPipeline, MotionAdapter, DDIMScheduler
|
13 |
from diffusers.pipelines.flux import FluxPipeline
|
14 |
from diffusers.utils import export_to_gif
|
15 |
from huggingface_hub import hf_hub_download
|
@@ -24,6 +24,8 @@ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
|
24 |
#base = "emilianJR/epiCRealism"
|
25 |
base = "black-forest-labs/FLUX.1-dev"
|
26 |
|
|
|
|
|
27 |
adapter = MotionAdapter().to(device)
|
28 |
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt)))
|
29 |
|
@@ -34,16 +36,10 @@ scheduler = DDIMScheduler.from_pretrained(
|
|
34 |
timestep_spacing="linspace",
|
35 |
beta_schedule="linear",
|
36 |
steps_offset=1,
|
37 |
-
token=os.getenv("hf_token")
|
38 |
)
|
39 |
-
|
40 |
-
|
41 |
-
torch_dtype=dtype,
|
42 |
-
token=os.getenv("hf_token")
|
43 |
-
).to(device)
|
44 |
-
pipe = AnimateDiffSDXLPipeline.from_pretrained(
|
45 |
base,
|
46 |
-
unet=flx.components.unet,
|
47 |
motion_adapter=adapter,
|
48 |
scheduler=scheduler,
|
49 |
torch_dtype=dtype,
|
@@ -53,6 +49,14 @@ pipe = AnimateDiffSDXLPipeline.from_pretrained(
|
|
53 |
pipe.enable_vae_slicing()
|
54 |
pipe.enable_vae_tiling()
|
55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
def translate(text,lang):
|
57 |
|
58 |
if text == None or lang == None:
|
|
|
9 |
import numpy as np
|
10 |
from lxml.html import fromstring
|
11 |
from pathos.threading import ThreadPool as Pool
|
12 |
+
from diffusers import DiffusionPipeline, AnimateDiffSDXLPipeline, MotionAdapter, DDIMScheduler
|
13 |
from diffusers.pipelines.flux import FluxPipeline
|
14 |
from diffusers.utils import export_to_gif
|
15 |
from huggingface_hub import hf_hub_download
|
|
|
24 |
#base = "emilianJR/epiCRealism"
|
25 |
base = "black-forest-labs/FLUX.1-dev"
|
26 |
|
27 |
+
vid_pipe = DiffusionPipeline.from_pretrained(repo)
|
28 |
+
|
29 |
adapter = MotionAdapter().to(device)
|
30 |
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt)))
|
31 |
|
|
|
36 |
timestep_spacing="linspace",
|
37 |
beta_schedule="linear",
|
38 |
steps_offset=1,
|
|
|
39 |
)
|
40 |
+
|
41 |
+
pipe = AnimateDiffPipeline(vid_pipe).from_pretrained(
|
|
|
|
|
|
|
|
|
42 |
base,
|
|
|
43 |
motion_adapter=adapter,
|
44 |
scheduler=scheduler,
|
45 |
torch_dtype=dtype,
|
|
|
49 |
pipe.enable_vae_slicing()
|
50 |
pipe.enable_vae_tiling()
|
51 |
|
52 |
+
"""
|
53 |
+
flx = FluxPipeline.from_pretrained(
|
54 |
+
base,
|
55 |
+
torch_dtype=dtype,
|
56 |
+
token=os.getenv("hf_token")
|
57 |
+
).to(device)
|
58 |
+
"""
|
59 |
+
|
60 |
def translate(text,lang):
|
61 |
|
62 |
if text == None or lang == None:
|