yunusajib commited on
Commit
c85c25e
·
verified ·
1 Parent(s): ccbb42c

Update chart

Browse files
Files changed (1) hide show
  1. app.py +35 -24
app.py CHANGED
@@ -11,6 +11,7 @@ import torch.nn.functional as F
11
  from torchvision import transforms
12
  from facenet_pytorch import MTCNN
13
  import gradio as gr
 
14
 
15
  class EmotionModel(torch.nn.Module):
16
  def __init__(self):
@@ -43,9 +44,6 @@ class EmotionDetector:
43
  ])
44
  self.softmax = torch.nn.Softmax(dim=1)
45
 
46
- # Load pre-trained weights here if available
47
- # self.model.load_state_dict(torch.load("emotion_model.pt", map_location=self.device))
48
-
49
  def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
50
  try:
51
  if video_path is None:
@@ -57,7 +55,6 @@ class EmotionDetector:
57
  cap = cv2.VideoCapture(video_path)
58
  fps = int(cap.get(cv2.CAP_PROP_FPS))
59
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
60
-
61
  if frame_count == 0:
62
  return None, "Invalid video file"
63
 
@@ -79,7 +76,7 @@ class EmotionDetector:
79
 
80
  face_tensor = self.transform(face_tensor) # Resize
81
  face_tensor = face_tensor.mean(dim=0, keepdim=True) # grayscale
82
- face_tensor = face_tensor.unsqueeze(0).to(self.device) # batch + channel
83
 
84
  with torch.no_grad():
85
  output = self.model(face_tensor)
@@ -95,25 +92,39 @@ class EmotionDetector:
95
  return None, "No emotions detected."
96
 
97
  df = pd.DataFrame(emotions_over_time)
98
-
99
- plt.figure(figsize=(12, 8))
100
- for emotion in self.emotions:
101
- if emotion in df.columns:
102
- plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
103
-
104
- plt.xlabel('Time (seconds)')
105
- plt.ylabel('Confidence (%)')
106
- plt.title('Emotions Over Time')
107
- plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
108
- plt.grid(True)
109
- plt.tight_layout()
110
-
111
- img_buf = io.BytesIO()
112
- plt.savefig(img_buf, format='png', dpi=150, bbox_inches='tight')
113
- img_buf.seek(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  plt.close()
115
 
116
- chart_image = Image.open(img_buf)
117
  avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
118
 
119
  result_text = f"**Video Analysis Complete**\n"
@@ -128,6 +139,7 @@ class EmotionDetector:
128
  except Exception as e:
129
  return None, f"Error: {str(e)}"
130
 
 
131
  def create_interface():
132
  detector = EmotionDetector()
133
 
@@ -140,7 +152,7 @@ def create_interface():
140
  return gr.Interface(
141
  fn=process,
142
  inputs=[
143
- gr.Video(label="Upload Video"), # Removed type="file"
144
  gr.Slider(minimum=1, maximum=60, step=1, value=30, label="Sample Rate (Frames)")
145
  ],
146
  outputs=[
@@ -151,6 +163,5 @@ def create_interface():
151
  description="Upload a video to analyze emotions over time."
152
  )
153
 
154
-
155
  if __name__ == "__main__":
156
  create_interface().launch()
 
11
  from torchvision import transforms
12
  from facenet_pytorch import MTCNN
13
  import gradio as gr
14
+ import seaborn as sns
15
 
16
  class EmotionModel(torch.nn.Module):
17
  def __init__(self):
 
44
  ])
45
  self.softmax = torch.nn.Softmax(dim=1)
46
 
 
 
 
47
  def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
48
  try:
49
  if video_path is None:
 
55
  cap = cv2.VideoCapture(video_path)
56
  fps = int(cap.get(cv2.CAP_PROP_FPS))
57
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
 
58
  if frame_count == 0:
59
  return None, "Invalid video file"
60
 
 
76
 
77
  face_tensor = self.transform(face_tensor) # Resize
78
  face_tensor = face_tensor.mean(dim=0, keepdim=True) # grayscale
79
+ face_tensor = face_tensor.unsqueeze(0).to(self.device)
80
 
81
  with torch.no_grad():
82
  output = self.model(face_tensor)
 
92
  return None, "No emotions detected."
93
 
94
  df = pd.DataFrame(emotions_over_time)
95
+ df['dominant_emotion'] = df[self.emotions].idxmax(axis=1)
96
+
97
+ # --- Chart Plotting ---
98
+ fig, axs = plt.subplots(2, 1, figsize=(12, 10), constrained_layout=True)
99
+
100
+ # 1. Stacked Area Chart
101
+ df_sorted = df.sort_values("timestamp")
102
+ axs[0].stackplot(df_sorted["timestamp"], [df_sorted[e] for e in self.emotions], labels=[e.title() for e in self.emotions])
103
+ axs[0].set_title("Emotions Over Time")
104
+ axs[0].set_xlabel("Time (seconds)")
105
+ axs[0].set_ylabel("Confidence (%)")
106
+ axs[0].legend(loc="upper right")
107
+ axs[0].grid(True)
108
+
109
+ # 2. Dominant Emotion Timeline (Bar Chart)
110
+ color_palette = sns.color_palette("husl", len(self.emotions))
111
+ emotion_color_map = {e: color_palette[i] for i, e in enumerate(self.emotions)}
112
+
113
+ colors = df['dominant_emotion'].map(emotion_color_map)
114
+ axs[1].bar(df['timestamp'], 1, color=colors, width=sample_rate / fps)
115
+ axs[1].set_title("Dominant Emotion Timeline")
116
+ axs[1].set_xlabel("Time (seconds)")
117
+ axs[1].set_yticks([])
118
+ axs[1].legend(handles=[plt.Rectangle((0, 0), 1, 1, color=emotion_color_map[e]) for e in self.emotions],
119
+ labels=[e.title() for e in self.emotions], loc="upper right", title="Emotion")
120
+
121
+ # Save chart
122
+ buf = io.BytesIO()
123
+ plt.savefig(buf, format="png")
124
+ buf.seek(0)
125
  plt.close()
126
 
127
+ chart_image = Image.open(buf)
128
  avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
129
 
130
  result_text = f"**Video Analysis Complete**\n"
 
139
  except Exception as e:
140
  return None, f"Error: {str(e)}"
141
 
142
+ # Gradio interface
143
  def create_interface():
144
  detector = EmotionDetector()
145
 
 
152
  return gr.Interface(
153
  fn=process,
154
  inputs=[
155
+ gr.Video(label="Upload Video"),
156
  gr.Slider(minimum=1, maximum=60, step=1, value=30, label="Sample Rate (Frames)")
157
  ],
158
  outputs=[
 
163
  description="Upload a video to analyze emotions over time."
164
  )
165
 
 
166
  if __name__ == "__main__":
167
  create_interface().launch()