Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
from deepface import DeepFace | |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer | |
import tempfile | |
analyzer = SentimentIntensityAnalyzer() | |
def analyze_text(text): | |
score = analyzer.polarity_scores(text) | |
if score['compound'] >= 0.05: | |
return "Positive π" | |
elif score['compound'] <= -0.05: | |
return "Negative π " | |
else: | |
return "Neutral π" | |
def process_all(text, video): | |
text_sentiment = analyze_sentiment(text) | |
video_emotion = analyze_video_emotion(video) | |
return f"Text Sentiment: {text_sentiment}\nFacial Emotion: {video_emotion}" | |
iface = gr.Interface( | |
fn=process_all, | |
inputs=[gr.Textbox(label="Social Media Post"), gr.Video(label="Upload Video")], | |
outputs="text", | |
title="Emotion & Sentiment Analyzer" | |
) | |
iface.launch() | |
def analyze_video(video_file): | |
if video_file is None: | |
return "No video uploaded" | |
# Save uploaded file temporarily | |
temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
with open(temp_path, "wb") as f: | |
f.write(video_file.read()) | |
cap = cv2.VideoCapture(temp_path) | |
success, frame = cap.read() | |
cap.release() | |
def analyze_video_emotion(video_file): | |
# Save the uploaded video to a temp file | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
tmp.write(video_file.read()) | |
tmp_path = tmp.name | |
cap = cv2.VideoCapture(tmp_path) | |
emotions = [] | |
frame_count = 0 | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret or frame_count > 60: # Limit to 60 frames max | |
break | |
try: | |
result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
emotions.append(result[0]['dominant_emotion']) | |
except: | |
pass | |
frame_count += 1 | |
cap.release() | |
if emotions: | |
# Return most common emotion | |
return max(set(emotions), key=emotions.count) | |
else: | |
return "No face detected" | |
if not success: | |
return "Could not read video" | |
try: | |
result = DeepFace.analyze(frame, actions=["emotion"], enforce_detection=False) | |
return result[0]['dominant_emotion'].capitalize() | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def analyze_post(text, video): | |
sentiment = analyze_text(text) | |
emotion = analyze_video(video) | |
return f"π Sentiment: {sentiment}\nπ₯ Emotion: {emotion}" | |
interface = gr.Interface( | |
fn=analyze_post, | |
inputs=[ | |
gr.Textbox(label="Post Text", placeholder="Enter your message here"), | |
gr.File(label="Upload video (.mp4)", file_types=[".mp4"]) | |
], | |
outputs="text", | |
title="π± Emotion & Sentiment Analyzer", | |
description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face." | |
) | |
interface.launch() | |