PengWeixuanSZU commited on
Commit
83f7a0c
·
verified ·
1 Parent(s): 095eb40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -56,10 +56,9 @@ random_seed = 42
56
  video_length = 201
57
  W = 1024
58
  H = W
59
- #device = "cuda" if torch.cuda.is_available() else "cpu"
60
 
61
  def get_pipe_image_and_video_predictor():
62
- device="cuda"
63
  vae = AutoencoderKLWan.from_pretrained("./model/vae", torch_dtype=torch.float16)
64
  transformer = Transformer3DModel.from_pretrained("./model/transformer", torch_dtype=torch.float16)
65
  scheduler = UniPCMultistepScheduler.from_pretrained("./model/scheduler")
@@ -248,6 +247,10 @@ def track_video(n_frames, video_state):
248
  images = [cv2.resize(img, (W_, H_)) for img in images]
249
  video_state["origin_images"] = images
250
  images = np.array(images)
 
 
 
 
251
  inference_state = video_predictor.init_state(images=images/255, device="cuda")
252
  video_state["inference_state"] = inference_state
253
 
 
56
  video_length = 201
57
  W = 1024
58
  H = W
 
59
 
60
  def get_pipe_image_and_video_predictor():
61
+ device="cpu"
62
  vae = AutoencoderKLWan.from_pretrained("./model/vae", torch_dtype=torch.float16)
63
  transformer = Transformer3DModel.from_pretrained("./model/transformer", torch_dtype=torch.float16)
64
  scheduler = UniPCMultistepScheduler.from_pretrained("./model/scheduler")
 
247
  images = [cv2.resize(img, (W_, H_)) for img in images]
248
  video_state["origin_images"] = images
249
  images = np.array(images)
250
+
251
+ video_predictor = build_sam2_video_predictor(config, sam2_checkpoint, device="cuda")
252
+
253
+
254
  inference_state = video_predictor.init_state(images=images/255, device="cuda")
255
  video_state["inference_state"] = inference_state
256