linoyts HF Staff commited on
Commit
d5984e9
·
verified ·
1 Parent(s): 949a551

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -18
app.py CHANGED
@@ -10,8 +10,8 @@ from diffusers.utils import export_to_video, load_video
10
  from torchvision import transforms
11
  import random
12
  from controlnet_aux import CannyDetector
13
- from image_gen_aux import DepthPreprocessor
14
- import mediapipe as mp
15
  from PIL import Image
16
  import cv2
17
 
@@ -28,12 +28,11 @@ pipe_upsample.to(device)
28
  pipeline.vae.enable_tiling()
29
 
30
  canny_processor = CannyDetector()
31
- depth_processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
32
 
33
  # Initialize MediaPipe pose estimation
34
- mp_drawing = mp.solutions.drawing_utils
35
- mp_drawing_styles = mp.solutions.drawing_styles
36
- mp_pose = mp.solutions.pose
37
 
38
  CONTROL_LORAS = {
39
  "canny": {
@@ -53,6 +52,14 @@ CONTROL_LORAS = {
53
  }
54
  }
55
 
 
 
 
 
 
 
 
 
56
  @spaces.GPU()
57
  def read_video(video) -> torch.Tensor:
58
  """
@@ -116,16 +123,6 @@ def process_video_for_canny(video):
116
 
117
  return canny_video
118
 
119
- @spaces.GPU()
120
- def process_video_for_depth(video):
121
- """
122
- Process video for depth control.
123
- """
124
- print("Processing video for depth control...")
125
- depth_video = []
126
- for frame in video:
127
- depth_video.append(depth_processor(frame)[0].convert("RGB"))
128
- return depth_video
129
 
130
  @spaces.GPU()
131
  def process_video_for_pose(video):
@@ -230,7 +227,7 @@ def generate_video(
230
  progress(0.05, desc="Loading control LoRA...")
231
 
232
  # Load the appropriate control LoRA and update state
233
- updated_lora_state = load_control_lora(control_type, current_lora_state)
234
 
235
  # Loads video into a list of pil images
236
  video = load_video(reference_video)
@@ -323,7 +320,7 @@ def generate_video(
323
  with gr.Blocks() as demo:
324
  gr.Markdown(
325
  """
326
- # LTX Video Control
327
  """
328
  )
329
 
@@ -350,6 +347,7 @@ with gr.Blocks() as demo:
350
  label="Control Type",
351
  choices=["canny", "depth", "pose"],
352
  value="canny",
 
353
  info="Choose the type of control guidance for video generation"
354
  )
355
 
 
10
  from torchvision import transforms
11
  import random
12
  from controlnet_aux import CannyDetector
13
+ # from image_gen_aux import DepthPreprocessor
14
+ # import mediapipe as mp
15
  from PIL import Image
16
  import cv2
17
 
 
28
  pipeline.vae.enable_tiling()
29
 
30
  canny_processor = CannyDetector()
 
31
 
32
  # Initialize MediaPipe pose estimation
33
+ # mp_drawing = mp.solutions.drawing_utils
34
+ # mp_drawing_styles = mp.solutions.drawing_styles
35
+ # mp_pose = mp.solutions.pose
36
 
37
  CONTROL_LORAS = {
38
  "canny": {
 
52
  }
53
  }
54
 
55
+ # load canny lora
56
+ pipeline.load_lora_weights(
57
+ CONTROL_LORAS["canny"]["repo"],
58
+ weight_name=CONTROL_LORAS["canny"]["weight_name"],
59
+ adapter_name=CONTROL_LORAS["canny"]["adapter_name"]
60
+ )
61
+ pipeline.set_adapters([lora_config["adapter_name"]], adapter_weights=[1.0])
62
+
63
  @spaces.GPU()
64
  def read_video(video) -> torch.Tensor:
65
  """
 
123
 
124
  return canny_video
125
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  @spaces.GPU()
128
  def process_video_for_pose(video):
 
227
  progress(0.05, desc="Loading control LoRA...")
228
 
229
  # Load the appropriate control LoRA and update state
230
+ # updated_lora_state = load_control_lora(control_type, current_lora_state)
231
 
232
  # Loads video into a list of pil images
233
  video = load_video(reference_video)
 
320
  with gr.Blocks() as demo:
321
  gr.Markdown(
322
  """
323
+ # LTX Video Control Canny
324
  """
325
  )
326
 
 
347
  label="Control Type",
348
  choices=["canny", "depth", "pose"],
349
  value="canny",
350
+ visible=False,
351
  info="Choose the type of control guidance for video generation"
352
  )
353