Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -149,6 +149,65 @@ interface = gr.Interface(
|
|
149 |
gr.Textbox(label="Post Text", placeholder="Enter your message here"),
|
150 |
gr.File(label="Upload video (.mp4)", file_types=[".mp4"])
|
151 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
outputs="text",
|
153 |
title="📱 Emotion & Sentiment Analyzer",
|
154 |
description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face."
|
|
|
149 |
gr.Textbox(label="Post Text", placeholder="Enter your message here"),
|
150 |
gr.File(label="Upload video (.mp4)", file_types=[".mp4"])
|
151 |
],
|
152 |
+
import gradio as gr
|
153 |
+
from deepface import DeepFace
|
154 |
+
from transformers import pipeline
|
155 |
+
import tempfile
|
156 |
+
import cv2
|
157 |
+
import moviepy.editor as mp
|
158 |
+
|
159 |
+
def analyze_text(text):
|
160 |
+
classifier = pipeline("sentiment-analysis")
|
161 |
+
return classifier(text)[0]['label']
|
162 |
+
|
163 |
+
def analyze_video_emotion(video_file):
|
164 |
+
try:
|
165 |
+
# Save the uploaded video to a temp file
|
166 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp:
|
167 |
+
tmp.write(video_file.read())
|
168 |
+
tmp_path = tmp.name
|
169 |
+
|
170 |
+
# Extract frames using MoviePy (more reliable than OpenCV alone)
|
171 |
+
video = mp.VideoFileClip(tmp_path)
|
172 |
+
frames = list(video.iter_frames())
|
173 |
+
|
174 |
+
emotions = []
|
175 |
+
for frame in frames[:60]: # Limit to first 60 frames
|
176 |
+
try:
|
177 |
+
# Use DeepFace for emotion detection
|
178 |
+
result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
|
179 |
+
emotions.append(result[0]['dominant_emotion'])
|
180 |
+
except Exception as e:
|
181 |
+
print("Error analyzing frame:", e)
|
182 |
+
|
183 |
+
if emotions:
|
184 |
+
# Return the most common emotion
|
185 |
+
return max(set(emotions), key=emotions.count)
|
186 |
+
else:
|
187 |
+
return "No face detected"
|
188 |
+
|
189 |
+
except Exception as e:
|
190 |
+
print("Error processing video:", e)
|
191 |
+
return "Error processing video file"
|
192 |
+
|
193 |
+
def process_all(text_input, video_input):
|
194 |
+
text_result = analyze_text(text_input)
|
195 |
+
video_result = analyze_video_emotion(video_input)
|
196 |
+
return f"Text Sentiment: {text_result}\nFacial Emotion: {video_result}"
|
197 |
+
|
198 |
+
iface = gr.Interface(
|
199 |
+
fn=process_all,
|
200 |
+
inputs=[
|
201 |
+
gr.Textbox(label="Enter Social Media Text"),
|
202 |
+
gr.Video(label="Upload a Video Clip")
|
203 |
+
],
|
204 |
+
outputs="text",
|
205 |
+
title="Emotion & Sentiment Decoder",
|
206 |
+
description="Analyzes social media text & facial expressions from video."
|
207 |
+
)
|
208 |
+
|
209 |
+
iface.launch()
|
210 |
+
|
211 |
outputs="text",
|
212 |
title="📱 Emotion & Sentiment Analyzer",
|
213 |
description="Analyze text sentiment and facial emotion from video. No re-running needed. Permanent on Hugging Face."
|