PengWeixuanSZU commited on
Commit
b867b89
·
verified ·
1 Parent(s): ca68585

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -64,7 +64,6 @@ def get_pipe_image_and_video_predictor():
64
  scheduler = UniPCMultistepScheduler.from_pretrained("./model/scheduler")
65
 
66
  pipe = Minimax_Remover_Pipeline(transformer=transformer, vae=vae, scheduler=scheduler)
67
- pipe.to(device)
68
 
69
  sam2_checkpoint = "./SAM2-Video-Predictor/checkpoints/sam2_hiera_large.pt"
70
  config = "sam2_hiera_l.yaml"
@@ -244,7 +243,10 @@ def track_video(n_frames,video_state):
244
  video_state["origin_images"] = images
245
  images = np.array(images)
246
 
247
- video_predictor_local=video_predictor.to("cuda")
 
 
 
248
  inference_state = video_predictor_local.init_state(images=images/255, device="cuda")
249
  video_state["inference_state"] = inference_state
250
 
@@ -279,6 +281,7 @@ def track_video(n_frames,video_state):
279
  output_frames.append(painted)
280
  video_state["masks"] =mask_frames
281
  video_file = f"/tmp/{time.time()}-{random.random()}-tracked_output.mp4"
 
282
  clip = ImageSequenceClip(output_frames, fps=15)
283
  clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
284
  return video_file,video_state
 
64
  scheduler = UniPCMultistepScheduler.from_pretrained("./model/scheduler")
65
 
66
  pipe = Minimax_Remover_Pipeline(transformer=transformer, vae=vae, scheduler=scheduler)
 
67
 
68
  sam2_checkpoint = "./SAM2-Video-Predictor/checkpoints/sam2_hiera_large.pt"
69
  config = "sam2_hiera_l.yaml"
 
243
  video_state["origin_images"] = images
244
  images = np.array(images)
245
 
246
+ sam2_checkpoint = "./SAM2-Video-Predictor/checkpoints/sam2_hiera_large.pt"
247
+ config = "sam2_hiera_l.yaml"
248
+ video_predictor_local = build_sam2_video_predictor(config, sam2_checkpoint, device="cuda")
249
+
250
  inference_state = video_predictor_local.init_state(images=images/255, device="cuda")
251
  video_state["inference_state"] = inference_state
252
 
 
281
  output_frames.append(painted)
282
  video_state["masks"] =mask_frames
283
  video_file = f"/tmp/{time.time()}-{random.random()}-tracked_output.mp4"
284
+ print("line 281 done")
285
  clip = ImageSequenceClip(output_frames, fps=15)
286
  clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
287
  return video_file,video_state