yunusajib commited on
Commit
421b67d
·
verified ·
1 Parent(s): b538542

update app

Browse files
Files changed (1) hide show
  1. app.py +125 -87
app.py CHANGED
@@ -1,90 +1,128 @@
1
- def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
2
- """Detect emotions in video by sampling frames"""
3
- try:
4
- if video_path is None:
5
- return None, "No video provided"
6
-
7
- # Check file size
8
- file_size_mb = os.path.getsize(video_path) / (1024 * 1024)
9
- if file_size_mb > max_size_mb:
10
- return None, f"Video file too large ({file_size_mb:.2f} MB). Max allowed: {max_size_mb} MB."
11
-
12
- cap = cv2.VideoCapture(video_path)
13
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
14
- fps = int(cap.get(cv2.CAP_PROP_FPS))
15
-
16
- if frame_count == 0:
17
- return None, "Invalid or unreadable video file"
18
-
19
- # Sample every Nth frame
20
- frame_indices = range(0, frame_count, sample_rate)
21
- emotions_over_time = []
22
-
23
- for frame_idx in frame_indices:
24
- cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
25
- ret, frame = cap.read()
26
- if not ret:
27
- continue
28
-
29
- try:
30
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
31
- cv2.imwrite(tmp_file.name, frame)
32
- temp_path = tmp_file.name
33
-
34
- result = DeepFace.analyze(
35
- img_path=temp_path,
36
- actions=['emotion'],
37
- enforce_detection=False,
38
- detector_backend='opencv'
39
- )
40
-
41
- emotions_data = result[0]['emotion'] if isinstance(result, list) else result['emotion']
42
- emotions_data['timestamp'] = frame_idx / fps
43
- emotions_over_time.append(emotions_data)
44
-
45
- os.unlink(temp_path)
46
- except Exception as e:
47
- print(f"Error processing frame {frame_idx}: {e}")
48
- continue
49
-
50
- cap.release()
51
-
52
- if not emotions_over_time:
53
- return None, "No emotions detected in video."
54
-
55
- df = pd.DataFrame(emotions_over_time)
56
-
57
- # Plot emotions over time
58
- plt.figure(figsize=(12, 8))
59
- for emotion in self.emotions:
60
- if emotion in df.columns:
61
- plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
62
- plt.xlabel('Time (seconds)')
63
- plt.ylabel('Confidence (%)')
64
- plt.title('Emotions Over Time')
65
- plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
66
- plt.grid(True, alpha=0.3)
67
- plt.tight_layout()
68
-
69
- img_buffer = io.BytesIO()
70
- plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
71
- img_buffer.seek(0)
72
- plt.close()
73
-
74
- chart_image = Image.open(img_buffer)
75
- avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
76
-
77
- result_text = f"**Video Analysis Complete**\n"
78
- result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n"
79
- result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
80
- result_text += "**Average Emotions:**\n"
81
- for emotion, confidence in avg_emotions.items():
82
- result_text += f"• {emotion.title()}: {confidence:.1f}%\n"
83
-
84
- return chart_image, result_text
85
-
86
- except Exception as e:
87
- return None, f"Error processing video: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  if __name__ == "__main__":
89
  demo = create_interface()
90
  demo.launch()
 
1
+ import os
2
+ import cv2
3
+ import tempfile
4
+ import io
5
+ import pandas as pd
6
+ import matplotlib.pyplot as plt
7
+ from PIL import Image
8
+ from deepface import DeepFace
9
+ import gradio as gr
10
+
11
+ class EmotionDetector:
12
+ def __init__(self):
13
+ self.emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
14
+
15
+ def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
16
+ """Detect emotions in video by sampling frames"""
17
+ try:
18
+ if video_path is None:
19
+ return None, "No video provided"
20
+
21
+ # Check file size
22
+ file_size_mb = os.path.getsize(video_path) / (1024 * 1024)
23
+ if file_size_mb > max_size_mb:
24
+ return None, f"Video file too large ({file_size_mb:.2f} MB). Max allowed: {max_size_mb} MB."
25
+
26
+ cap = cv2.VideoCapture(video_path)
27
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
28
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
29
+
30
+ if frame_count == 0:
31
+ return None, "Invalid or unreadable video file"
32
+
33
+ frame_indices = range(0, frame_count, sample_rate)
34
+ emotions_over_time = []
35
+
36
+ for frame_idx in frame_indices:
37
+ cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
38
+ ret, frame = cap.read()
39
+ if not ret:
40
+ continue
41
+
42
+ try:
43
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
44
+ cv2.imwrite(tmp_file.name, frame)
45
+ temp_path = tmp_file.name
46
+
47
+ result = DeepFace.analyze(
48
+ img_path=temp_path,
49
+ actions=['emotion'],
50
+ enforce_detection=False,
51
+ detector_backend='opencv'
52
+ )
53
+
54
+ emotions_data = result[0]['emotion'] if isinstance(result, list) else result['emotion']
55
+ emotions_data['timestamp'] = frame_idx / fps
56
+ emotions_over_time.append(emotions_data)
57
+
58
+ os.unlink(temp_path)
59
+ except Exception as e:
60
+ print(f"Error processing frame {frame_idx}: {e}")
61
+ continue
62
+
63
+ cap.release()
64
+
65
+ if not emotions_over_time:
66
+ return None, "No emotions detected in video."
67
+
68
+ df = pd.DataFrame(emotions_over_time)
69
+
70
+ # Plot emotions over time
71
+ plt.figure(figsize=(12, 8))
72
+ for emotion in self.emotions:
73
+ if emotion in df.columns:
74
+ plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
75
+ plt.xlabel('Time (seconds)')
76
+ plt.ylabel('Confidence (%)')
77
+ plt.title('Emotions Over Time')
78
+ plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
79
+ plt.grid(True, alpha=0.3)
80
+ plt.tight_layout()
81
+
82
+ img_buffer = io.BytesIO()
83
+ plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
84
+ img_buffer.seek(0)
85
+ plt.close()
86
+
87
+ chart_image = Image.open(img_buffer)
88
+ avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
89
+
90
+ result_text = f"**Video Analysis Complete**\n"
91
+ result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n"
92
+ result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
93
+ result_text += "**Average Emotions:**\n"
94
+ for emotion, confidence in avg_emotions.items():
95
+ result_text += f"• {emotion.title()}: {confidence:.1f}%\n"
96
+
97
+ return chart_image, result_text
98
+
99
+ except Exception as e:
100
+ return None, f"Error processing video: {str(e)}"
101
+
102
+ def create_interface():
103
+ detector = EmotionDetector()
104
+
105
+ def process(video, sample_rate):
106
+ if video is None:
107
+ return None, "Please upload a video."
108
+ return detector.detect_emotions_video(video, sample_rate)
109
+
110
+ iface = gr.Interface(
111
+ fn=process,
112
+ inputs=[
113
+ gr.Video(label="Upload Video"),
114
+ gr.Slider(minimum=1, maximum=60, step=1, value=30, label="Sample Rate (Frames)")
115
+ ],
116
+ outputs=[
117
+ gr.Image(type="pil", label="Emotion Chart"),
118
+ gr.Textbox(label="Analysis Summary")
119
+ ],
120
+ title="Emotion Detection from Video",
121
+ description="Upload a video to analyze facial emotions over time."
122
+ )
123
+
124
+ return iface
125
+
126
  if __name__ == "__main__":
127
  demo = create_interface()
128
  demo.launch()