goryhon commited on
Commit
9bbc634
·
verified ·
1 Parent(s): 0c224b8

Update web-demos/hugging_face/app.py

Browse files
Files changed (1) hide show
  1. web-demos/hugging_face/app.py +40 -40
web-demos/hugging_face/app.py CHANGED
@@ -92,48 +92,48 @@ def get_frames_from_video(video_input, video_state):
92
  frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
93
  status_ok = False
94
  else:
95
- while cap.isOpened():
96
- ret, frame = cap.read()
97
- if ret == True:
98
- # resize input image
99
- original_h, original_w = frame.shape[:2]
100
- frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
 
 
 
 
 
101
  else:
102
- break
103
- t = len(frames)
104
- if t > 0:
105
- print(f'Inp video shape: t_{t}, s_{original_h}x{original_w}')
106
- else:
107
- print(f'Inp video shape: t_{t}, no input video!!!')
108
- except (OSError, TypeError, ValueError, KeyError, SyntaxError) as e:
109
- status_ok = False
110
- print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
111
 
112
- # initialize video_state
113
- if frames[0].shape[0] > 720 or frames[0].shape[1] > 720:
114
- operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {original_w}x{original_h}, and the length of its longest edge exceeds 720 pixels. We may resize the input video during processing.)", "Normal")]
115
-
116
- video_state = {
117
- "user_name": user_name,
118
- "video_name": os.path.split(video_path)[-1],
119
- "origin_images": frames,
120
- "painted_images": frames.copy(),
121
- "masks": [np.zeros((original_h, original_w), np.uint8)]*len(frames),
122
- "logits": [None]*len(frames),
123
- "select_frame_number": 0,
124
- "fps": fps
125
- }
126
- video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(video_state["video_name"], round(video_state["fps"], 0), length, (original_w, original_h))
127
- model.samcontroler.sam_controler.reset_image()
128
- model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
129
- return video_state, video_info, video_state["origin_images"][0], gr.update(visible=status_ok, maximum=len(frames), value=1), gr.update(visible=status_ok, maximum=len(frames), value=len(frames)), \
130
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
131
- gr.update(visible=status_ok), gr.update(visible=status_ok),\
132
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
133
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
134
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
135
- gr.update(visible=status_ok), gr.update(visible=status_ok, choices=[], value=[]), \
136
- gr.update(visible=True, value=operation_log), gr.update(visible=status_ok, value=operation_log)
137
 
138
  def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):
139
 
 
92
  frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
93
  status_ok = False
94
  else:
95
+ while cap.isOpened():
96
+ ret, frame = cap.read()
97
+ if ret == True:
98
+ # resize input image
99
+ original_h, original_w = frame.shape[:2]
100
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
101
+ else:
102
+ break
103
+ t = len(frames)
104
+ if t > 0:
105
+ print(f'Inp video shape: t_{t}, s_{original_h}x{original_w}')
106
  else:
107
+ print(f'Inp video shape: t_{t}, no input video!!!')
108
+ except (OSError, TypeError, ValueError, KeyError, SyntaxError) as e:
109
+ status_ok = False
110
+ print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
111
+
112
+ # initialize video_state
113
+ if frames[0].shape[0] > 720 or frames[0].shape[1] > 720:
114
+ operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {original_w}x{original_h}, and the length of its longest edge exceeds 720 pixels. We may resize the input video during processing.)", "Normal")]
 
115
 
116
+ video_state = {
117
+ "user_name": user_name,
118
+ "video_name": os.path.split(video_path)[-1],
119
+ "origin_images": frames,
120
+ "painted_images": frames.copy(),
121
+ "masks": [np.zeros((original_h, original_w), np.uint8)]*len(frames),
122
+ "logits": [None]*len(frames),
123
+ "select_frame_number": 0,
124
+ "fps": fps
125
+ }
126
+ video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(video_state["video_name"], round(video_state["fps"], 0), length, (original_w, original_h))
127
+ model.samcontroler.sam_controler.reset_image()
128
+ model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
129
+ return video_state, video_info, video_state["origin_images"][0], gr.update(visible=status_ok, maximum=len(frames), value=1), gr.update(visible=status_ok, maximum=len(frames), value=len(frames)), \
130
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
131
+ gr.update(visible=status_ok), gr.update(visible=status_ok),\
132
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
133
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
134
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
135
+ gr.update(visible=status_ok), gr.update(visible=status_ok, choices=[], value=[]), \
136
+ gr.update(visible=True, value=operation_log), gr.update(visible=status_ok, value=operation_log)
 
 
 
 
137
 
138
  def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):
139