File size: 2,917 Bytes
f0f3558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0cb5bb
 
 
 
 
 
 
 
 
 
 
 
 
 
f0f3558
 
 
 
cef2d02
f0f3558
 
 
 
 
 
 
f148efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0f3558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cef2d02
 
f0f3558
 
 
cef2d02
f0f3558
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
import cv2
from deepface import DeepFace
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import tempfile

analyzer = SentimentIntensityAnalyzer()

def analyze_text(text):
    score = analyzer.polarity_scores(text)
    if score['compound'] >= 0.05:
        return "Positive 😊"
    elif score['compound'] <= -0.05:
        return "Negative 😠"
    else:
        return "Neutral 😐"

def process_all(text, video):
    text_sentiment = analyze_sentiment(text)
    video_emotion = analyze_video_emotion(video)
    return f"Text Sentiment: {text_sentiment}\nFacial Emotion: {video_emotion}"

iface = gr.Interface(
    fn=process_all,
    inputs=[gr.Textbox(label="Social Media Post"), gr.Video(label="Upload Video")],
    outputs="text",
    title="Emotion & Sentiment Analyzer"
)

iface.launch()

def analyze_video(video_file):
    if video_file is None:
        return "No video uploaded"

    # Save uploaded file temporarily
    temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    with open(temp_path, "wb") as f:
        f.write(video_file.read())

    cap = cv2.VideoCapture(temp_path)
    success, frame = cap.read()
    cap.release()
    
def analyze_video_emotion(video_file):
    # Save the uploaded video to a temp file
    with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp:
        tmp.write(video_file.read())
        tmp_path = tmp.name

    cap = cv2.VideoCapture(tmp_path)
    emotions = []
    frame_count = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret or frame_count > 60:  # Limit to 60 frames max
            break
        try:
            result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
            emotions.append(result[0]['dominant_emotion'])
        except:
            pass
        frame_count += 1

    cap.release()

    if emotions:
        # Return most common emotion
        return max(set(emotions), key=emotions.count)
    else:
        return "No face detected"


    if not success:
        return "Could not read video"

    try:
        result = DeepFace.analyze(frame, actions=["emotion"], enforce_detection=False)
        return result[0]['dominant_emotion'].capitalize()
    except Exception as e:
        return f"Error: {str(e)}"

def analyze_post(text, video):
    sentiment = analyze_text(text)
    emotion = analyze_video(video)
    return f"πŸ“ Sentiment: {sentiment}\nπŸŽ₯ Emotion: {emotion}"

interface = gr.Interface(
    fn=analyze_post,
    inputs=[
        gr.Textbox(label="Post Text", placeholder="Enter your message here"),
        gr.File(label="Upload video (.mp4)", file_types=[".mp4"])
    ],
    outputs="text",
    title="πŸ“± Emotion & Sentiment Analyzer",
    description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face."
)

interface.launch()