yunusajib commited on
Commit
24c8903
·
verified ·
1 Parent(s): cd1fa9e

Upload app.py and requirements.txt

Browse files
Files changed (2) hide show
  1. app.py +261 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import pandas as pd
4
+ import os
5
+ import tempfile
6
+ import traceback
7
+ from pathlib import Path
8
+ import matplotlib.pyplot as plt
9
+ import numpy as np
10
+
11
+ # Global variables to store detector
12
+ detector = None
13
+ detector_loaded = False
14
+
15
+ def load_video_detector():
16
+ """Load FER detector with error handling"""
17
+ global detector, detector_loaded
18
+
19
+ if detector_loaded:
20
+ return detector
21
+
22
+ try:
23
+ from fer import FER
24
+ detector = FER(mtcnn=True)
25
+ detector_loaded = True
26
+ return detector
27
+ except ImportError as e:
28
+ raise Exception(f"Failed to import FER: {e}")
29
+ except Exception as e:
30
+ raise Exception(f"Failed to initialize FER detector: {e}")
31
+
32
+ def analyze_video_emotions(video_path, progress=gr.Progress()):
33
+ """Analyze emotions in video with robust error handling"""
34
+ global detector
35
+
36
+ if detector is None:
37
+ try:
38
+ detector = load_video_detector()
39
+ except Exception as e:
40
+ return f"Error loading detector: {str(e)}", None, None, None
41
+
42
+ try:
43
+ cap = cv2.VideoCapture(str(video_path))
44
+
45
+ # Check if video opened successfully
46
+ if not cap.isOpened():
47
+ return "Could not open video file", None, None, None
48
+
49
+ emotions = []
50
+ frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
51
+
52
+ # Handle cases where frame rate detection fails
53
+ if frame_rate <= 0:
54
+ frame_rate = 30
55
+
56
+ frame_interval = max(1, frame_rate * 2) # analyze every 2 seconds
57
+ frame_count = 0
58
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
59
+
60
+ progress(0, desc="Starting analysis...")
61
+
62
+ while cap.isOpened():
63
+ ret, frame = cap.read()
64
+ if not ret:
65
+ break
66
+
67
+ if frame_count % frame_interval == 0:
68
+ progress_val = frame_count / total_frames if total_frames > 0 else 0
69
+ progress(progress_val, desc=f"Analyzing frame {frame_count}/{total_frames}")
70
+
71
+ try:
72
+ results = detector.detect_emotions(frame)
73
+ if results and len(results) > 0:
74
+ top_emotion = max(results[0]["emotions"], key=results[0]["emotions"].get)
75
+ emotions.append(top_emotion)
76
+ except Exception as e:
77
+ print(f"Warning: Error analyzing frame {frame_count}: {e}")
78
+ continue
79
+
80
+ frame_count += 1
81
+
82
+ cap.release()
83
+
84
+ if not emotions:
85
+ return "No faces or emotions detected in the video. Try uploading a video with clear facial expressions.", None, None, None
86
+
87
+ # Process results
88
+ emotion_counts = pd.Series(emotions).value_counts().to_dict()
89
+
90
+ # Create results text
91
+ total_detections = sum(emotion_counts.values())
92
+ results_text = "**Analysis completed!**\n\n**Detected Emotions:**\n\n"
93
+
94
+ for emo, count in emotion_counts.items():
95
+ percentage = (count / total_detections) * 100
96
+ results_text += f"- **{emo.title()}**: {count} detections ({percentage:.1f}%)\n"
97
+
98
+ # Dominant emotion
99
+ dominant_emotion = max(emotion_counts, key=emotion_counts.get)
100
+ results_text += f"\n**Dominant emotion detected**: {dominant_emotion.title()}"
101
+
102
+ # Create visualization
103
+ fig, ax = plt.subplots(figsize=(10, 6))
104
+ emotions_list = list(emotion_counts.keys())
105
+ counts_list = list(emotion_counts.values())
106
+
107
+ bars = ax.bar(emotions_list, counts_list, color=['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4', '#FFEAA7', '#DDA0DD', '#98D8C8'])
108
+ ax.set_xlabel('Emotions')
109
+ ax.set_ylabel('Number of Detections')
110
+ ax.set_title('Emotion Distribution in Video')
111
+
112
+ # Add value labels on bars
113
+ for bar in bars:
114
+ height = bar.get_height()
115
+ ax.text(bar.get_x() + bar.get_width()/2., height,
116
+ f'{int(height)}',
117
+ ha='center', va='bottom')
118
+
119
+ plt.xticks(rotation=45)
120
+ plt.tight_layout()
121
+
122
+ return results_text, fig, emotion_counts, dominant_emotion
123
+
124
+ except Exception as e:
125
+ error_msg = f"Error during video analysis: {e}\nTraceback: {traceback.format_exc()}"
126
+ return error_msg, None, None, None
127
+
128
+ def process_video(video_file):
129
+ """Main processing function for Gradio interface"""
130
+ if video_file is None:
131
+ return "Please upload a video file to analyze facial emotions.", None
132
+
133
+ # Get file info
134
+ file_size = os.path.getsize(video_file) / (1024 * 1024) # MB
135
+ file_info = f"File uploaded successfully: {os.path.basename(video_file)}\nFile size: {file_size:.2f} MB\n\nAnalyzing facial emotions in video... This may take a few minutes.\n\n"
136
+
137
+ try:
138
+ results_text, plot, emotion_counts, dominant_emotion = analyze_video_emotions(video_file)
139
+ return file_info + results_text, plot
140
+
141
+ except Exception as e:
142
+ error_msg = f"Analysis failed: {e}\nPlease try with a different video file or check the file format."
143
+ return file_info + error_msg, None
144
+
145
+ def create_interface():
146
+ """Create the Gradio interface"""
147
+
148
+ # Custom CSS for better styling
149
+ css = """
150
+ .gradio-container {
151
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
152
+ }
153
+ .main-header {
154
+ text-align: center;
155
+ color: #2c3e50;
156
+ margin-bottom: 2rem;
157
+ }
158
+ """
159
+
160
+ with gr.Blocks(css=css, title="Video Emotion Detection", theme=gr.themes.Soft()) as iface:
161
+
162
+ # Header
163
+ gr.HTML("""
164
+ <div class="main-header">
165
+ <h1>😊 Video Emotion Detection</h1>
166
+ <p>Upload a video file to analyze facial emotions using advanced AI</p>
167
+ </div>
168
+ """)
169
+
170
+ with gr.Row():
171
+ with gr.Column(scale=1):
172
+ # File upload
173
+ video_input = gr.File(
174
+ label="Choose a video file",
175
+ file_types=[".mp4", ".avi", ".mov"],
176
+ type="filepath"
177
+ )
178
+
179
+ # Process button
180
+ process_btn = gr.Button("Analyze Video", variant="primary", size="lg")
181
+
182
+ # Info section
183
+ with gr.Accordion("ℹ️ About this app", open=False):
184
+ gr.Markdown("""
185
+ ### How it works:
186
+ - **Facial Detection**: Uses MTCNN for face detection
187
+ - **Emotion Recognition**: Analyzes facial expressions using FER (Facial Emotion Recognition)
188
+ - **Sampling**: Analyzes frames every 2 seconds for efficiency
189
+ - **Supported Formats**: MP4, AVI, MOV
190
+
191
+ ### Tips for best results:
192
+ - Use videos with clear, well-lit faces
193
+ - Ensure faces are not too small in the frame
194
+ - Videos with multiple people will analyze all detected faces
195
+ - Shorter videos (< 5 minutes) process faster
196
+ """)
197
+
198
+ with gr.Column(scale=2):
199
+ # Results section
200
+ results_output = gr.Textbox(
201
+ label="Analysis Results",
202
+ lines=15,
203
+ max_lines=20,
204
+ interactive=False,
205
+ placeholder="Upload a video and click 'Analyze Video' to see results here..."
206
+ )
207
+
208
+ # Plot output
209
+ plot_output = gr.Plot(label="Emotion Distribution Chart")
210
+
211
+ # Event handlers
212
+ process_btn.click(
213
+ fn=process_video,
214
+ inputs=[video_input],
215
+ outputs=[results_output, plot_output],
216
+ show_progress=True
217
+ )
218
+
219
+ # Auto-process when file is uploaded (optional)
220
+ video_input.change(
221
+ fn=lambda x: ("Video uploaded successfully! Click 'Analyze Video' to start processing." if x else "", None),
222
+ inputs=[video_input],
223
+ outputs=[results_output, plot_output]
224
+ )
225
+
226
+ # Footer
227
+ gr.HTML("""
228
+ <div style="text-align: center; margin-top: 2rem; padding: 1rem; background-color: #f8f9fa; border-radius: 0.5rem;">
229
+ <p><strong>Video Emotion Detection App</strong> - Powered by FER and MTCNN</p>
230
+ <p>Upload your video files and get detailed emotion analysis with visualizations</p>
231
+ </div>
232
+ """)
233
+
234
+ return iface
235
+
236
+ def main():
237
+ """Main function to launch the app"""
238
+ # Initialize detector on startup
239
+ try:
240
+ print("Loading emotion detection model...")
241
+ load_video_detector()
242
+ print("Model loaded successfully!")
243
+ except Exception as e:
244
+ print(f"Warning: Could not pre-load detector: {e}")
245
+ print("Detector will be loaded when first video is processed.")
246
+
247
+ # Create and launch interface
248
+ iface = create_interface()
249
+
250
+ # Launch the app
251
+ iface.launch(
252
+ server_name="0.0.0.0", # Allow external access
253
+ server_port=7860, # Default Gradio port
254
+ share=False, # Set to True to create public link
255
+ debug=False,
256
+ show_error=True,
257
+ inbrowser=True # Auto-open in browser
258
+ )
259
+
260
+ if __name__ == "__main__":
261
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ opencv-python-headless==4.8.1.78
3
+ pandas>=1.5.0
4
+ matplotlib>=3.6.0
5
+ fer>=22.5.1
6
+ tensorflow>=2.10.0
7
+ numpy>=1.21.0
8
+ Pillow>=9.0.0
9
+ mtcnn>=0.1.1