yunusajib's picture
Upload app.py and requirements.txt
24c8903 verified
raw
history blame
9.58 kB
import gradio as gr
import cv2
import pandas as pd
import os
import tempfile
import traceback
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
# Global variables to store detector
detector = None
detector_loaded = False
def load_video_detector():
"""Load FER detector with error handling"""
global detector, detector_loaded
if detector_loaded:
return detector
try:
from fer import FER
detector = FER(mtcnn=True)
detector_loaded = True
return detector
except ImportError as e:
raise Exception(f"Failed to import FER: {e}")
except Exception as e:
raise Exception(f"Failed to initialize FER detector: {e}")
def analyze_video_emotions(video_path, progress=gr.Progress()):
"""Analyze emotions in video with robust error handling"""
global detector
if detector is None:
try:
detector = load_video_detector()
except Exception as e:
return f"Error loading detector: {str(e)}", None, None, None
try:
cap = cv2.VideoCapture(str(video_path))
# Check if video opened successfully
if not cap.isOpened():
return "Could not open video file", None, None, None
emotions = []
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
# Handle cases where frame rate detection fails
if frame_rate <= 0:
frame_rate = 30
frame_interval = max(1, frame_rate * 2) # analyze every 2 seconds
frame_count = 0
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
progress(0, desc="Starting analysis...")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if frame_count % frame_interval == 0:
progress_val = frame_count / total_frames if total_frames > 0 else 0
progress(progress_val, desc=f"Analyzing frame {frame_count}/{total_frames}")
try:
results = detector.detect_emotions(frame)
if results and len(results) > 0:
top_emotion = max(results[0]["emotions"], key=results[0]["emotions"].get)
emotions.append(top_emotion)
except Exception as e:
print(f"Warning: Error analyzing frame {frame_count}: {e}")
continue
frame_count += 1
cap.release()
if not emotions:
return "No faces or emotions detected in the video. Try uploading a video with clear facial expressions.", None, None, None
# Process results
emotion_counts = pd.Series(emotions).value_counts().to_dict()
# Create results text
total_detections = sum(emotion_counts.values())
results_text = "**Analysis completed!**\n\n**Detected Emotions:**\n\n"
for emo, count in emotion_counts.items():
percentage = (count / total_detections) * 100
results_text += f"- **{emo.title()}**: {count} detections ({percentage:.1f}%)\n"
# Dominant emotion
dominant_emotion = max(emotion_counts, key=emotion_counts.get)
results_text += f"\n**Dominant emotion detected**: {dominant_emotion.title()}"
# Create visualization
fig, ax = plt.subplots(figsize=(10, 6))
emotions_list = list(emotion_counts.keys())
counts_list = list(emotion_counts.values())
bars = ax.bar(emotions_list, counts_list, color=['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4', '#FFEAA7', '#DDA0DD', '#98D8C8'])
ax.set_xlabel('Emotions')
ax.set_ylabel('Number of Detections')
ax.set_title('Emotion Distribution in Video')
# Add value labels on bars
for bar in bars:
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width()/2., height,
f'{int(height)}',
ha='center', va='bottom')
plt.xticks(rotation=45)
plt.tight_layout()
return results_text, fig, emotion_counts, dominant_emotion
except Exception as e:
error_msg = f"Error during video analysis: {e}\nTraceback: {traceback.format_exc()}"
return error_msg, None, None, None
def process_video(video_file):
"""Main processing function for Gradio interface"""
if video_file is None:
return "Please upload a video file to analyze facial emotions.", None
# Get file info
file_size = os.path.getsize(video_file) / (1024 * 1024) # MB
file_info = f"File uploaded successfully: {os.path.basename(video_file)}\nFile size: {file_size:.2f} MB\n\nAnalyzing facial emotions in video... This may take a few minutes.\n\n"
try:
results_text, plot, emotion_counts, dominant_emotion = analyze_video_emotions(video_file)
return file_info + results_text, plot
except Exception as e:
error_msg = f"Analysis failed: {e}\nPlease try with a different video file or check the file format."
return file_info + error_msg, None
def create_interface():
"""Create the Gradio interface"""
# Custom CSS for better styling
css = """
.gradio-container {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
.main-header {
text-align: center;
color: #2c3e50;
margin-bottom: 2rem;
}
"""
with gr.Blocks(css=css, title="Video Emotion Detection", theme=gr.themes.Soft()) as iface:
# Header
gr.HTML("""
<div class="main-header">
<h1>😊 Video Emotion Detection</h1>
<p>Upload a video file to analyze facial emotions using advanced AI</p>
</div>
""")
with gr.Row():
with gr.Column(scale=1):
# File upload
video_input = gr.File(
label="Choose a video file",
file_types=[".mp4", ".avi", ".mov"],
type="filepath"
)
# Process button
process_btn = gr.Button("Analyze Video", variant="primary", size="lg")
# Info section
with gr.Accordion("ℹ️ About this app", open=False):
gr.Markdown("""
### How it works:
- **Facial Detection**: Uses MTCNN for face detection
- **Emotion Recognition**: Analyzes facial expressions using FER (Facial Emotion Recognition)
- **Sampling**: Analyzes frames every 2 seconds for efficiency
- **Supported Formats**: MP4, AVI, MOV
### Tips for best results:
- Use videos with clear, well-lit faces
- Ensure faces are not too small in the frame
- Videos with multiple people will analyze all detected faces
- Shorter videos (< 5 minutes) process faster
""")
with gr.Column(scale=2):
# Results section
results_output = gr.Textbox(
label="Analysis Results",
lines=15,
max_lines=20,
interactive=False,
placeholder="Upload a video and click 'Analyze Video' to see results here..."
)
# Plot output
plot_output = gr.Plot(label="Emotion Distribution Chart")
# Event handlers
process_btn.click(
fn=process_video,
inputs=[video_input],
outputs=[results_output, plot_output],
show_progress=True
)
# Auto-process when file is uploaded (optional)
video_input.change(
fn=lambda x: ("Video uploaded successfully! Click 'Analyze Video' to start processing." if x else "", None),
inputs=[video_input],
outputs=[results_output, plot_output]
)
# Footer
gr.HTML("""
<div style="text-align: center; margin-top: 2rem; padding: 1rem; background-color: #f8f9fa; border-radius: 0.5rem;">
<p><strong>Video Emotion Detection App</strong> - Powered by FER and MTCNN</p>
<p>Upload your video files and get detailed emotion analysis with visualizations</p>
</div>
""")
return iface
def main():
"""Main function to launch the app"""
# Initialize detector on startup
try:
print("Loading emotion detection model...")
load_video_detector()
print("Model loaded successfully!")
except Exception as e:
print(f"Warning: Could not pre-load detector: {e}")
print("Detector will be loaded when first video is processed.")
# Create and launch interface
iface = create_interface()
# Launch the app
iface.launch(
server_name="0.0.0.0", # Allow external access
server_port=7860, # Default Gradio port
share=False, # Set to True to create public link
debug=False,
show_error=True,
inbrowser=True # Auto-open in browser
)
if __name__ == "__main__":
main()