Salimshakeel commited on
Commit
1ff63b5
·
1 Parent(s): 992f44d
Files changed (2) hide show
  1. services/extractor.py +7 -0
  2. services/summarizer.py +7 -1
services/extractor.py CHANGED
@@ -47,6 +47,8 @@ def extract_frames(video_path):
47
  indices = []
48
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
49
  # total_frames = 300 # TEMP
 
 
50
 
51
  for idx in tqdm(range(0, total_frames, FRAME_RATE)):
52
  cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
@@ -56,11 +58,16 @@ def extract_frames(video_path):
56
  frames.append(Image.fromarray(frame))
57
  indices.append(idx)
58
 
 
 
 
59
  cap.release()
60
  return frames, indices
61
 
62
  def extract_features(frames):
63
  features = [transform(frame) for frame in frames]
64
  features = torch.stack(features).to(DEVICE)
 
65
  features = feature_extractor(features)
 
66
  return features
 
47
  indices = []
48
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
49
  # total_frames = 300 # TEMP
50
+ print(f"Total frames in video: {total_frames}")
51
+ print(f"Extracting frames at every {FRAME_RATE} frames...")
52
 
53
  for idx in tqdm(range(0, total_frames, FRAME_RATE)):
54
  cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
 
58
  frames.append(Image.fromarray(frame))
59
  indices.append(idx)
60
 
61
+ print(f"Indices of extracted frames: {indices}")
62
+ print(f"Total frames extracted: {len(frames)}")
63
+
64
  cap.release()
65
  return frames, indices
66
 
67
  def extract_features(frames):
68
  features = [transform(frame) for frame in frames]
69
  features = torch.stack(features).to(DEVICE)
70
+ print("Features before GoogleNet extraction:", features.shape)
71
  features = feature_extractor(features)
72
+ print("Features after GoogleNet extraction:", features.shape)
73
  return features
services/summarizer.py CHANGED
@@ -13,10 +13,16 @@ def get_scores(features):
13
  # features.dtype: torch.float32
14
  # features.device: cpu
15
  with torch.no_grad():
 
16
  scores, _ = model(features)
17
- return scores.squeeze().cpu().numpy()
 
 
18
 
19
  def get_selected_indices(scores, picks, threshold=SCORE_THRESHOLD):
 
 
 
20
  return [picks[i] for i, score in enumerate(scores) if score >= threshold]
21
 
22
  import subprocess
 
13
  # features.dtype: torch.float32
14
  # features.device: cpu
15
  with torch.no_grad():
16
+ print("Features before model inference:", features.shape)
17
  scores, _ = model(features)
18
+ scores = scores.squeeze().cpu().numpy()
19
+ print("Features after model inference:", features.shape)
20
+ return scores
21
 
22
  def get_selected_indices(scores, picks, threshold=SCORE_THRESHOLD):
23
+ print("Threshold for selection:", threshold)
24
+ print("Scores:", scores.shape, scores)
25
+ print("Picks:", picks.shape, picks)
26
  return [picks[i] for i, score in enumerate(scores) if score >= threshold]
27
 
28
  import subprocess