Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import numpy as np | |
import pandas as pd | |
import matplotlib.pyplot as plt | |
from deepface import DeepFace | |
import os | |
import tempfile | |
from PIL import Image | |
import io | |
import base64 | |
class EmotionDetector: | |
def __init__(self): | |
self.emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] | |
def detect_emotions_image(self, image): | |
"""Detect emotions in a single image""" | |
try: | |
if image is None: | |
return None, "No image provided" | |
# Convert PIL Image to numpy array if needed | |
if isinstance(image, Image.Image): | |
image = np.array(image) | |
# Convert RGB to BGR for OpenCV | |
if len(image.shape) == 3 and image.shape[2] == 3: | |
image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
else: | |
image_bgr = image | |
# Save temporary image for DeepFace | |
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file: | |
cv2.imwrite(tmp_file.name, image_bgr) | |
temp_path = tmp_file.name | |
try: | |
# Analyze emotions using DeepFace | |
result = DeepFace.analyze( | |
img_path=temp_path, | |
actions=['emotion'], | |
enforce_detection=False, | |
detector_backend='opencv' | |
) | |
# Handle both single face and multiple faces results | |
if isinstance(result, list): | |
emotions_data = result[0]['emotion'] | |
else: | |
emotions_data = result['emotion'] | |
# Create emotion chart | |
emotion_df = pd.DataFrame(list(emotions_data.items()), | |
columns=['Emotion', 'Confidence']) | |
emotion_df = emotion_df.sort_values('Confidence', ascending=True) | |
# Create matplotlib plot | |
plt.figure(figsize=(10, 6)) | |
bars = plt.barh(emotion_df['Emotion'], emotion_df['Confidence']) | |
plt.xlabel('Confidence (%)') | |
plt.title('Emotion Detection Results') | |
plt.grid(axis='x', alpha=0.3) | |
# Color bars based on emotion | |
colors = { | |
'happy': '#FFD700', | |
'sad': '#4169E1', | |
'angry': '#DC143C', | |
'fear': '#800080', | |
'surprise': '#FF8C00', | |
'disgust': '#228B22', | |
'neutral': '#708090' | |
} | |
for bar, emotion in zip(bars, emotion_df['Emotion']): | |
bar.set_color(colors.get(emotion, '#708090')) | |
plt.tight_layout() | |
# Save plot to bytes | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight') | |
img_buffer.seek(0) | |
plt.close() | |
# Convert to PIL Image | |
chart_image = Image.open(img_buffer) | |
# Get dominant emotion | |
dominant_emotion = max(emotions_data, key=emotions_data.get) | |
confidence = emotions_data[dominant_emotion] | |
result_text = f"**Dominant Emotion:** {dominant_emotion.title()}\n" | |
result_text += f"**Confidence:** {confidence:.1f}%\n\n" | |
result_text += "**All Emotions:**\n" | |
for emotion, conf in sorted(emotions_data.items(), key=lambda x: x[1], reverse=True): | |
result_text += f"β’ {emotion.title()}: {conf:.1f}%\n" | |
return chart_image, result_text | |
finally: | |
# Clean up temporary file | |
if os.path.exists(temp_path): | |
os.unlink(temp_path) | |
except Exception as e: | |
error_msg = f"Error analyzing image: {str(e)}" | |
print(error_msg) # For debugging | |
return None, error_msg | |
def detect_emotions_video(self, video_path, sample_rate=30): | |
"""Detect emotions in video by sampling frames""" | |
try: | |
if video_path is None: | |
return None, "No video provided" | |
cap = cv2.VideoCapture(video_path) | |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
if frame_count == 0: | |
return None, "Invalid video file" | |
# Sample frames every 'sample_rate' frames | |
frame_indices = range(0, frame_count, sample_rate) | |
emotions_over_time = [] | |
for frame_idx in frame_indices: | |
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) | |
ret, frame = cap.read() | |
if not ret: | |
continue | |
try: | |
# Save frame temporarily | |
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file: | |
cv2.imwrite(tmp_file.name, frame) | |
temp_path = tmp_file.name | |
# Analyze frame | |
result = DeepFace.analyze( | |
img_path=temp_path, | |
actions=['emotion'], | |
enforce_detection=False, | |
detector_backend='opencv' | |
) | |
if isinstance(result, list): | |
emotions_data = result[0]['emotion'] | |
else: | |
emotions_data = result['emotion'] | |
# Add timestamp | |
timestamp = frame_idx / fps | |
emotions_data['timestamp'] = timestamp | |
emotions_over_time.append(emotions_data) | |
# Clean up | |
os.unlink(temp_path) | |
except Exception as e: | |
print(f"Error processing frame {frame_idx}: {e}") | |
continue | |
cap.release() | |
if not emotions_over_time: | |
return None, "No emotions detected in video" | |
# Create DataFrame for plotting | |
df = pd.DataFrame(emotions_over_time) | |
# Plot emotions over time | |
plt.figure(figsize=(12, 8)) | |
for emotion in self.emotions: | |
if emotion in df.columns: | |
plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2) | |
plt.xlabel('Time (seconds)') | |
plt.ylabel('Confidence (%)') | |
plt.title('Emotions Over Time') | |
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') | |
plt.grid(True, alpha=0.3) | |
plt.tight_layout() | |
# Save plot | |
img_buffer = io.BytesIO() | |
plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight') | |
img_buffer.seek(0) | |
plt.close() | |
chart_image = Image.open(img_buffer) | |
# Calculate average emotions | |
avg_emotions = df[self.emotions].mean().sort_values(ascending=False) | |
result_text = f"**Video Analysis Complete**\n" | |
result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n" | |
result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n" | |
result_text += "**Average Emotions:**\n" | |
for emotion, confidence in avg_emotions.items(): | |
result_text += f"β’ {emotion.title()}: {confidence:.1f}%\n" | |
return chart_image, result_text | |
except Exception as e: | |
return None, f"Error processing video: {str(e)}" | |
# Initialize detector | |
detector = EmotionDetector() | |
# Create Gradio interface | |
def create_interface(): | |
with gr.Blocks(title="Emotion Detection App", theme=gr.themes.Soft()) as demo: | |
gr.Markdown( | |
""" | |
# π Emotion Detection App | |
Upload an image or video to detect emotions using AI. This app uses DeepFace for accurate emotion recognition. | |
**Supported emotions:** Happy, Sad, Angry, Fear, Surprise, Disgust, Neutral | |
""" | |
) | |
with gr.Tabs(): | |
# Image Analysis Tab | |
with gr.Tab("πΈ Image Analysis"): | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image( | |
label="Upload Image", | |
type="pil" | |
) | |
image_button = gr.Button("Analyze Emotions", variant="primary") | |
with gr.Column(): | |
image_chart = gr.Image(label="Emotion Chart") | |
image_results = gr.Markdown(label="Results") | |
image_button.click( | |
fn=detector.detect_emotions_image, | |
inputs=[image_input], | |
outputs=[image_chart, image_results] | |
) | |
# Video Analysis Tab | |
with gr.Tab("π₯ Video Analysis"): | |
with gr.Row(): | |
with gr.Column(): | |
video_input = gr.Video(label="Upload Video") | |
with gr.Row(): | |
sample_rate = gr.Slider( | |
minimum=10, | |
maximum=60, | |
value=30, | |
step=5, | |
label="Frame Sampling Rate" | |
) | |
video_button = gr.Button("Analyze Video", variant="primary") | |
with gr.Column(): | |
video_chart = gr.Image(label="Emotions Over Time") | |
video_results = gr.Markdown(label="Results") | |
video_button.click( | |
fn=detector.detect_emotions_video, | |
inputs=[video_input, sample_rate], | |
outputs=[video_chart, video_results] | |
) | |
# Examples | |
gr.Markdown("### π Instructions") | |
gr.Markdown( | |
""" | |
**For Images:** | |
- Upload any image with visible faces | |
- The app will detect and analyze emotions | |
- Results show confidence percentages for each emotion | |
**For Videos:** | |
- Upload video files (MP4, AVI, MOV, etc.) | |
- Adjust frame sampling rate to balance speed vs accuracy | |
- Lower values = more frames analyzed = more accurate but slower | |
- Higher values = fewer frames analyzed = faster but less detailed | |
**Tips:** | |
- Ensure faces are clearly visible and well-lit | |
- The app works best with front-facing faces | |
- Multiple faces in one image/video are supported | |
""" | |
) | |
return demo | |
# Launch the app | |
if __name__ == "__main__": | |
demo = create_interface() | |
demo.launch() |