Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -188,8 +188,6 @@ def inference_and_return_video(dilation_iterations, num_inference_steps, video_s
|
|
188 |
|
189 |
images = np.array(images)
|
190 |
masks = np.array(masks)
|
191 |
-
print(f"line 191 images shape:{images.shape},masks shape:{masks.shape}")
|
192 |
-
#line 191 images shape:(1, 1024, 1820, 3),masks shape:(1, 1024, 1820), which should be (16, 1024, 1820, 3) and (16, 1024, 1820, 3)
|
193 |
img_tensor, mask_tensor = preprocess_for_removal(images, masks)
|
194 |
mask_tensor = mask_tensor[:,:,:,:1]
|
195 |
|
@@ -200,6 +198,7 @@ def inference_and_return_video(dilation_iterations, num_inference_steps, video_s
|
|
200 |
height = 832
|
201 |
width = 480
|
202 |
|
|
|
203 |
with torch.no_grad():
|
204 |
out = pipe(
|
205 |
images=img_tensor,
|
@@ -220,7 +219,7 @@ def inference_and_return_video(dilation_iterations, num_inference_steps, video_s
|
|
220 |
clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
|
221 |
return video_file
|
222 |
|
223 |
-
@spaces.GPU(duration=
|
224 |
def track_video(n_frames,video_state):
|
225 |
input_points = video_state["input_points"]
|
226 |
input_labels = video_state["input_labels"]
|
@@ -271,16 +270,11 @@ def track_video(n_frames,video_state):
|
|
271 |
mask += out_mask
|
272 |
mask = np.clip(mask, 0, 1)
|
273 |
mask = cv2.resize(mask, (W_, H_))
|
274 |
-
print(f"line 275 mask shape:{mask.shape}")
|
275 |
mask_frames.append(mask)
|
276 |
-
print(f"line 277 len(mask_frames)={len(mask_frames)}")
|
277 |
painted = (1 - mask * 0.5) * frame + mask * 0.5 * color
|
278 |
painted = np.uint8(np.clip(painted * 255, 0, 255))
|
279 |
output_frames.append(painted)
|
280 |
-
print(f"line 281 len(output_frames)={len(output_frames)}, painted shape:{painted.shape}")
|
281 |
video_state["masks"] =mask_frames
|
282 |
-
print(f'line 283 len video_state["masks"]:{len(video_state["masks"])}')
|
283 |
-
print(f'line 284 video_state["masks"][0].shape:{video_state["masks"][0].shape}')
|
284 |
video_file = f"/tmp/{time.time()}-{random.random()}-tracked_output.mp4"
|
285 |
clip = ImageSequenceClip(output_frames, fps=15)
|
286 |
clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
|
|
|
188 |
|
189 |
images = np.array(images)
|
190 |
masks = np.array(masks)
|
|
|
|
|
191 |
img_tensor, mask_tensor = preprocess_for_removal(images, masks)
|
192 |
mask_tensor = mask_tensor[:,:,:,:1]
|
193 |
|
|
|
198 |
height = 832
|
199 |
width = 480
|
200 |
|
201 |
+
pipe=pipe.to("cuda")
|
202 |
with torch.no_grad():
|
203 |
out = pipe(
|
204 |
images=img_tensor,
|
|
|
219 |
clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
|
220 |
return video_file
|
221 |
|
222 |
+
@spaces.GPU(duration=200)
|
223 |
def track_video(n_frames,video_state):
|
224 |
input_points = video_state["input_points"]
|
225 |
input_labels = video_state["input_labels"]
|
|
|
270 |
mask += out_mask
|
271 |
mask = np.clip(mask, 0, 1)
|
272 |
mask = cv2.resize(mask, (W_, H_))
|
|
|
273 |
mask_frames.append(mask)
|
|
|
274 |
painted = (1 - mask * 0.5) * frame + mask * 0.5 * color
|
275 |
painted = np.uint8(np.clip(painted * 255, 0, 255))
|
276 |
output_frames.append(painted)
|
|
|
277 |
video_state["masks"] =mask_frames
|
|
|
|
|
278 |
video_file = f"/tmp/{time.time()}-{random.random()}-tracked_output.mp4"
|
279 |
clip = ImageSequenceClip(output_frames, fps=15)
|
280 |
clip.write_videofile(video_file, codec='libx264', audio=False, verbose=False, logger=None)
|