Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -117,9 +117,9 @@ def init_pipe():
|
|
117 |
controlnet_transformer,
|
118 |
)
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
|
124 |
return pipe
|
125 |
|
@@ -131,10 +131,9 @@ def inference(source_images,
|
|
131 |
h, w, random_seed)->List[PIL.Image.Image]:
|
132 |
torch.manual_seed(random_seed)
|
133 |
|
134 |
-
pipe.to(DEVICE)
|
135 |
-
|
136 |
-
|
137 |
-
# pipe.controlnet_transformer.to(DEVICE)
|
138 |
|
139 |
source_pixel_values = source_images/127.5 - 1.0
|
140 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
@@ -172,13 +171,13 @@ def inference(source_images,
|
|
172 |
height = h,
|
173 |
width = w,
|
174 |
num_frames = f,
|
175 |
-
num_inference_steps =
|
176 |
interval = 6,
|
177 |
guidance_scale = guidance_scale,
|
178 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
179 |
).frames[0]
|
180 |
b=time.perf_counter()
|
181 |
-
print(f"Denoise
|
182 |
|
183 |
return video
|
184 |
|
|
|
117 |
controlnet_transformer,
|
118 |
)
|
119 |
|
120 |
+
pipe.vae.enable_slicing()
|
121 |
+
pipe.vae.enable_tiling()
|
122 |
+
pipe.enable_model_cpu_offload()
|
123 |
|
124 |
return pipe
|
125 |
|
|
|
131 |
h, w, random_seed)->List[PIL.Image.Image]:
|
132 |
torch.manual_seed(random_seed)
|
133 |
|
134 |
+
pipe.vae.to(DEVICE)
|
135 |
+
pipe.transformer.to(DEVICE)
|
136 |
+
pipe.controlnet_transformer.to(DEVICE)
|
|
|
137 |
|
138 |
source_pixel_values = source_images/127.5 - 1.0
|
139 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
|
|
171 |
height = h,
|
172 |
width = w,
|
173 |
num_frames = f,
|
174 |
+
num_inference_steps = 20,
|
175 |
interval = 6,
|
176 |
guidance_scale = guidance_scale,
|
177 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
178 |
).frames[0]
|
179 |
b=time.perf_counter()
|
180 |
+
print(f"Denoise 20 steps in {b-a}s")
|
181 |
|
182 |
return video
|
183 |
|