yunusajib commited on
Commit
b538542
·
verified ·
1 Parent(s): a5e17e5

update app v3

Browse files
Files changed (1) hide show
  1. app.py +77 -290
app.py CHANGED
@@ -1,303 +1,90 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
- import pandas as pd
5
- import matplotlib.pyplot as plt
6
- from deepface import DeepFace
7
- import os
8
- import tempfile
9
- from PIL import Image
10
- import io
11
- import base64
12
-
13
- class EmotionDetector:
14
- def __init__(self):
15
- self.emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
16
 
17
- def detect_emotions_image(self, image):
18
- """Detect emotions in a single image"""
19
- try:
20
- if image is None:
21
- return None, "No image provided"
22
-
23
- # Convert PIL Image to numpy array if needed
24
- if isinstance(image, Image.Image):
25
- image = np.array(image)
26
-
27
- # Convert RGB to BGR for OpenCV
28
- if len(image.shape) == 3 and image.shape[2] == 3:
29
- image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
30
- else:
31
- image_bgr = image
32
-
33
- # Save temporary image for DeepFace
34
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
35
- cv2.imwrite(tmp_file.name, image_bgr)
36
- temp_path = tmp_file.name
37
-
 
38
  try:
39
- # Analyze emotions using DeepFace
 
 
 
40
  result = DeepFace.analyze(
41
  img_path=temp_path,
42
  actions=['emotion'],
43
  enforce_detection=False,
44
  detector_backend='opencv'
45
  )
46
-
47
- # Handle both single face and multiple faces results
48
- if isinstance(result, list):
49
- emotions_data = result[0]['emotion']
50
- else:
51
- emotions_data = result['emotion']
52
-
53
- # Create emotion chart
54
- emotion_df = pd.DataFrame(list(emotions_data.items()),
55
- columns=['Emotion', 'Confidence'])
56
- emotion_df = emotion_df.sort_values('Confidence', ascending=True)
57
-
58
- # Create matplotlib plot
59
- plt.figure(figsize=(10, 6))
60
- bars = plt.barh(emotion_df['Emotion'], emotion_df['Confidence'])
61
- plt.xlabel('Confidence (%)')
62
- plt.title('Emotion Detection Results')
63
- plt.grid(axis='x', alpha=0.3)
64
-
65
- # Color bars based on emotion
66
- colors = {
67
- 'happy': '#FFD700',
68
- 'sad': '#4169E1',
69
- 'angry': '#DC143C',
70
- 'fear': '#800080',
71
- 'surprise': '#FF8C00',
72
- 'disgust': '#228B22',
73
- 'neutral': '#708090'
74
- }
75
-
76
- for bar, emotion in zip(bars, emotion_df['Emotion']):
77
- bar.set_color(colors.get(emotion, '#708090'))
78
-
79
- plt.tight_layout()
80
-
81
- # Save plot to bytes
82
- img_buffer = io.BytesIO()
83
- plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
84
- img_buffer.seek(0)
85
- plt.close()
86
-
87
- # Convert to PIL Image
88
- chart_image = Image.open(img_buffer)
89
-
90
- # Get dominant emotion
91
- dominant_emotion = max(emotions_data, key=emotions_data.get)
92
- confidence = emotions_data[dominant_emotion]
93
-
94
- result_text = f"**Dominant Emotion:** {dominant_emotion.title()}\n"
95
- result_text += f"**Confidence:** {confidence:.1f}%\n\n"
96
- result_text += "**All Emotions:**\n"
97
-
98
- for emotion, conf in sorted(emotions_data.items(), key=lambda x: x[1], reverse=True):
99
- result_text += f"• {emotion.title()}: {conf:.1f}%\n"
100
-
101
- return chart_image, result_text
102
-
103
- finally:
104
- # Clean up temporary file
105
- if os.path.exists(temp_path):
106
- os.unlink(temp_path)
107
-
108
- except Exception as e:
109
- error_msg = f"Error analyzing image: {str(e)}"
110
- print(error_msg) # For debugging
111
- return None, error_msg
112
-
113
- def detect_emotions_video(self, video_path, sample_rate=30):
114
- """Detect emotions in video by sampling frames"""
115
- try:
116
- if video_path is None:
117
- return None, "No video provided"
118
-
119
- cap = cv2.VideoCapture(video_path)
120
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
121
- fps = int(cap.get(cv2.CAP_PROP_FPS))
122
-
123
- if frame_count == 0:
124
- return None, "Invalid video file"
125
-
126
- # Sample frames every 'sample_rate' frames
127
- frame_indices = range(0, frame_count, sample_rate)
128
- emotions_over_time = []
129
-
130
- for frame_idx in frame_indices:
131
- cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
132
- ret, frame = cap.read()
133
-
134
- if not ret:
135
- continue
136
-
137
- try:
138
- # Save frame temporarily
139
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
140
- cv2.imwrite(tmp_file.name, frame)
141
- temp_path = tmp_file.name
142
-
143
- # Analyze frame
144
- result = DeepFace.analyze(
145
- img_path=temp_path,
146
- actions=['emotion'],
147
- enforce_detection=False,
148
- detector_backend='opencv'
149
- )
150
-
151
- if isinstance(result, list):
152
- emotions_data = result[0]['emotion']
153
- else:
154
- emotions_data = result['emotion']
155
-
156
- # Add timestamp
157
- timestamp = frame_idx / fps
158
- emotions_data['timestamp'] = timestamp
159
- emotions_over_time.append(emotions_data)
160
-
161
- # Clean up
162
- os.unlink(temp_path)
163
-
164
- except Exception as e:
165
- print(f"Error processing frame {frame_idx}: {e}")
166
- continue
167
-
168
- cap.release()
169
-
170
- if not emotions_over_time:
171
- return None, "No emotions detected in video"
172
-
173
- # Create DataFrame for plotting
174
- df = pd.DataFrame(emotions_over_time)
175
-
176
- # Plot emotions over time
177
- plt.figure(figsize=(12, 8))
178
-
179
- for emotion in self.emotions:
180
- if emotion in df.columns:
181
- plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
182
-
183
- plt.xlabel('Time (seconds)')
184
- plt.ylabel('Confidence (%)')
185
- plt.title('Emotions Over Time')
186
- plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
187
- plt.grid(True, alpha=0.3)
188
- plt.tight_layout()
189
-
190
- # Save plot
191
- img_buffer = io.BytesIO()
192
- plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
193
- img_buffer.seek(0)
194
- plt.close()
195
-
196
- chart_image = Image.open(img_buffer)
197
-
198
- # Calculate average emotions
199
- avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
200
-
201
- result_text = f"**Video Analysis Complete**\n"
202
- result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n"
203
- result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
204
- result_text += "**Average Emotions:**\n"
205
-
206
- for emotion, confidence in avg_emotions.items():
207
- result_text += f"• {emotion.title()}: {confidence:.1f}%\n"
208
-
209
- return chart_image, result_text
210
-
211
- except Exception as e:
212
- return None, f"Error processing video: {str(e)}"
213
 
214
- # Initialize detector
215
- detector = EmotionDetector()
 
216
 
217
- # Create Gradio interface
218
- def create_interface():
219
- with gr.Blocks(title="Emotion Detection App", theme=gr.themes.Soft()) as demo:
220
- gr.Markdown(
221
- """
222
- # 🎭 Emotion Detection App
223
-
224
- Upload an image or video to detect emotions using AI. This app uses DeepFace for accurate emotion recognition.
225
-
226
- **Supported emotions:** Happy, Sad, Angry, Fear, Surprise, Disgust, Neutral
227
- """
228
- )
229
-
230
- with gr.Tabs():
231
- # Image Analysis Tab
232
- with gr.Tab("📸 Image Analysis"):
233
- with gr.Row():
234
- with gr.Column():
235
- image_input = gr.Image(
236
- label="Upload Image",
237
- type="pil"
238
- )
239
- image_button = gr.Button("Analyze Emotions", variant="primary")
240
-
241
- with gr.Column():
242
- image_chart = gr.Image(label="Emotion Chart")
243
- image_results = gr.Markdown(label="Results")
244
-
245
- image_button.click(
246
- fn=detector.detect_emotions_image,
247
- inputs=[image_input],
248
- outputs=[image_chart, image_results]
249
- )
250
-
251
- # Video Analysis Tab
252
- with gr.Tab("🎥 Video Analysis"):
253
- with gr.Row():
254
- with gr.Column():
255
- video_input = gr.Video(label="Upload Video")
256
- with gr.Row():
257
- sample_rate = gr.Slider(
258
- minimum=10,
259
- maximum=60,
260
- value=30,
261
- step=5,
262
- label="Frame Sampling Rate"
263
- )
264
- video_button = gr.Button("Analyze Video", variant="primary")
265
-
266
- with gr.Column():
267
- video_chart = gr.Image(label="Emotions Over Time")
268
- video_results = gr.Markdown(label="Results")
269
-
270
- video_button.click(
271
- fn=detector.detect_emotions_video,
272
- inputs=[video_input, sample_rate],
273
- outputs=[video_chart, video_results]
274
- )
275
-
276
- # Examples
277
- gr.Markdown("### 📋 Instructions")
278
- gr.Markdown(
279
- """
280
- **For Images:**
281
- - Upload any image with visible faces
282
- - The app will detect and analyze emotions
283
- - Results show confidence percentages for each emotion
284
-
285
- **For Videos:**
286
- - Upload video files (MP4, AVI, MOV, etc.)
287
- - Adjust frame sampling rate to balance speed vs accuracy
288
- - Lower values = more frames analyzed = more accurate but slower
289
- - Higher values = fewer frames analyzed = faster but less detailed
290
-
291
- **Tips:**
292
- - Ensure faces are clearly visible and well-lit
293
- - The app works best with front-facing faces
294
- - Multiple faces in one image/video are supported
295
- """
296
- )
297
-
298
- return demo
299
 
300
- # Launch the app
 
301
  if __name__ == "__main__":
302
  demo = create_interface()
303
- demo.launch()
 
1
+ def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
2
+ """Detect emotions in video by sampling frames"""
3
+ try:
4
+ if video_path is None:
5
+ return None, "No video provided"
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Check file size
8
+ file_size_mb = os.path.getsize(video_path) / (1024 * 1024)
9
+ if file_size_mb > max_size_mb:
10
+ return None, f"Video file too large ({file_size_mb:.2f} MB). Max allowed: {max_size_mb} MB."
11
+
12
+ cap = cv2.VideoCapture(video_path)
13
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
14
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
15
+
16
+ if frame_count == 0:
17
+ return None, "Invalid or unreadable video file"
18
+
19
+ # Sample every Nth frame
20
+ frame_indices = range(0, frame_count, sample_rate)
21
+ emotions_over_time = []
22
+
23
+ for frame_idx in frame_indices:
24
+ cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
25
+ ret, frame = cap.read()
26
+ if not ret:
27
+ continue
28
+
29
  try:
30
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
31
+ cv2.imwrite(tmp_file.name, frame)
32
+ temp_path = tmp_file.name
33
+
34
  result = DeepFace.analyze(
35
  img_path=temp_path,
36
  actions=['emotion'],
37
  enforce_detection=False,
38
  detector_backend='opencv'
39
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ emotions_data = result[0]['emotion'] if isinstance(result, list) else result['emotion']
42
+ emotions_data['timestamp'] = frame_idx / fps
43
+ emotions_over_time.append(emotions_data)
44
 
45
+ os.unlink(temp_path)
46
+ except Exception as e:
47
+ print(f"Error processing frame {frame_idx}: {e}")
48
+ continue
49
+
50
+ cap.release()
51
+
52
+ if not emotions_over_time:
53
+ return None, "No emotions detected in video."
54
+
55
+ df = pd.DataFrame(emotions_over_time)
56
+
57
+ # Plot emotions over time
58
+ plt.figure(figsize=(12, 8))
59
+ for emotion in self.emotions:
60
+ if emotion in df.columns:
61
+ plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
62
+ plt.xlabel('Time (seconds)')
63
+ plt.ylabel('Confidence (%)')
64
+ plt.title('Emotions Over Time')
65
+ plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
66
+ plt.grid(True, alpha=0.3)
67
+ plt.tight_layout()
68
+
69
+ img_buffer = io.BytesIO()
70
+ plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
71
+ img_buffer.seek(0)
72
+ plt.close()
73
+
74
+ chart_image = Image.open(img_buffer)
75
+ avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
76
+
77
+ result_text = f"**Video Analysis Complete**\n"
78
+ result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n"
79
+ result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
80
+ result_text += "**Average Emotions:**\n"
81
+ for emotion, confidence in avg_emotions.items():
82
+ result_text += f"• {emotion.title()}: {confidence:.1f}%\n"
83
+
84
+ return chart_image, result_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ except Exception as e:
87
+ return None, f"Error processing video: {str(e)}"
88
  if __name__ == "__main__":
89
  demo = create_interface()
90
+ demo.launch()