Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ from diffusers.utils import export_to_video, load_video
|
|
10 |
from torchvision import transforms
|
11 |
import random
|
12 |
from controlnet_aux import CannyDetector
|
|
|
13 |
|
14 |
dtype = torch.bfloat16
|
15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -21,6 +22,7 @@ pipe_upsample.to(device)
|
|
21 |
pipeline.vae.enable_tiling()
|
22 |
|
23 |
canny_processor = CannyDetector()
|
|
|
24 |
|
25 |
CONTROL_LORAS = {
|
26 |
"canny": {
|
@@ -98,19 +100,19 @@ def process_video_for_canny(video):
|
|
98 |
canny_video = []
|
99 |
for frame in video:
|
100 |
# TODO: change resolution logic
|
101 |
-
canny_video.append(
|
102 |
|
103 |
return canny_video
|
104 |
|
105 |
def process_video_for_depth(video):
|
106 |
"""
|
107 |
Process video for depth control.
|
108 |
-
Placeholder function - will return video as-is for now.
|
109 |
-
TODO: Implement depth estimation processing
|
110 |
"""
|
111 |
print("Processing video for depth control...")
|
112 |
-
|
113 |
-
|
|
|
|
|
114 |
|
115 |
def process_video_for_pose(video):
|
116 |
"""
|
|
|
10 |
from torchvision import transforms
|
11 |
import random
|
12 |
from controlnet_aux import CannyDetector
|
13 |
+
from image_gen_aux import DepthPreprocessor
|
14 |
|
15 |
dtype = torch.bfloat16
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
22 |
pipeline.vae.enable_tiling()
|
23 |
|
24 |
canny_processor = CannyDetector()
|
25 |
+
depth_processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
|
26 |
|
27 |
CONTROL_LORAS = {
|
28 |
"canny": {
|
|
|
100 |
canny_video = []
|
101 |
for frame in video:
|
102 |
# TODO: change resolution logic
|
103 |
+
canny_video.append(canny_processor(frame, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024))
|
104 |
|
105 |
return canny_video
|
106 |
|
107 |
def process_video_for_depth(video):
|
108 |
"""
|
109 |
Process video for depth control.
|
|
|
|
|
110 |
"""
|
111 |
print("Processing video for depth control...")
|
112 |
+
dapth_video = []
|
113 |
+
for frame in video:
|
114 |
+
dapth_video.append(depth_processor(frame)[0].convert("RGB"))
|
115 |
+
return dapth_video
|
116 |
|
117 |
def process_video_for_pose(video):
|
118 |
"""
|