yunusajib commited on
Commit
60d50c6
·
verified ·
1 Parent(s): 583277c

app.py update

Browse files
Files changed (1) hide show
  1. app.py +70 -99
app.py CHANGED
@@ -1,108 +1,79 @@
1
  import os
2
  import cv2
3
- import io
4
  import tempfile
5
  import pandas as pd
6
  import matplotlib.pyplot as plt
7
- from PIL import Image
8
- from deepface import DeepFace
9
  import gradio as gr
10
 
11
- class EmotionDetector:
12
- def __init__(self):
13
- self.emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
14
-
15
- def detect_emotions_video(self, video_path, sample_rate=30, max_size_mb=50):
16
- try:
17
- if video_path is None:
18
- return None, "No video provided"
19
-
20
- file_size_mb = os.path.getsize(video_path) / (1024 * 1024)
21
- if file_size_mb > max_size_mb:
22
- return None, f"Video too large ({file_size_mb:.2f} MB). Limit: {max_size_mb} MB."
23
-
24
- cap = cv2.VideoCapture(video_path)
25
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
26
- fps = int(cap.get(cv2.CAP_PROP_FPS))
27
- if frame_count == 0:
28
- return None, "Invalid video"
29
-
30
- emotions_over_time = []
31
- for i in range(0, frame_count, sample_rate):
32
- cap.set(cv2.CAP_PROP_POS_FRAMES, i)
33
- ret, frame = cap.read()
34
- if not ret:
35
- continue
36
- rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
37
- pil_img = Image.fromarray(rgb)
38
-
39
- with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmpfile:
40
- pil_img.save(tmpfile.name)
41
- result = DeepFace.analyze(
42
- img_path=tmpfile.name,
43
- actions=['emotion'],
44
- enforce_detection=False,
45
- detector_backend='opencv',
46
- prog_backend='pytorch'
47
- )
48
- emotions = result[0]['emotion'] if isinstance(result, list) else result['emotion']
49
- emotions['timestamp'] = i / fps
50
- emotions_over_time.append(emotions)
51
-
52
- os.unlink(tmpfile.name)
53
-
54
- cap.release()
55
-
56
- if not emotions_over_time:
57
- return None, "No emotions detected."
58
-
59
- df = pd.DataFrame(emotions_over_time)
60
- plt.figure(figsize=(12, 6))
61
- for emotion in self.emotions:
62
- if emotion in df.columns:
63
- plt.plot(df['timestamp'], df[emotion], label=emotion)
64
- plt.xlabel("Time (s)")
65
- plt.ylabel("Emotion Confidence")
66
- plt.title("Emotion Trends Over Time")
67
- plt.legend()
68
- plt.grid(True)
69
-
70
- buf = io.BytesIO()
71
- plt.savefig(buf, format='png')
72
- buf.seek(0)
73
- chart = Image.open(buf)
74
- plt.close()
75
-
76
- avg = df[self.emotions].mean().sort_values(ascending=False)
77
- summary = "**Video Analysis Complete**\n"
78
- summary += f"**Frames Analyzed:** {len(df)}\n"
79
- summary += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
80
- summary += "**Average Emotions:**\n"
81
- for e, v in avg.items():
82
- summary += f"• {e.title()}: {v:.1f}%\n"
83
-
84
- return chart, summary
85
-
86
- except Exception as e:
87
- return None, f"Error: {str(e)}"
88
-
89
- def create_interface():
90
- detector = EmotionDetector()
91
- def process(video, rate): return detector.detect_emotions_video(video, rate)
92
-
93
- return gr.Interface(
94
- fn=process,
95
- inputs=[
96
- gr.Video(label="Upload Video"),
97
- gr.Slider(minimum=1, maximum=60, step=1, value=30, label="Sample Rate")
98
- ],
99
- outputs=[
100
- gr.Image(type="pil", label="Emotion Chart"),
101
- gr.Textbox(label="Summary")
102
- ],
103
- title="Emotion Detection from Video",
104
- description="Upload a video to analyze facial emotions using DeepFace."
105
- )
106
 
107
  if __name__ == "__main__":
108
- create_interface().launch()
 
1
  import os
2
  import cv2
 
3
  import tempfile
4
  import pandas as pd
5
  import matplotlib.pyplot as plt
6
+ from deepface import DeepFace
 
7
  import gradio as gr
8
 
9
+ def analyze_video(video_path):
10
+ cap = cv2.VideoCapture(video_path)
11
+ fps = cap.get(cv2.CAP_PROP_FPS)
12
+ frames = []
13
+ count = 0
14
+
15
+ while True:
16
+ ret, frame = cap.read()
17
+ if not ret:
18
+ break
19
+ if count % int(fps * 2) == 0: # sample every 2 seconds
20
+ frames.append(frame)
21
+ count += 1
22
+ cap.release()
23
+
24
+ emotions_summary = []
25
+ for i, frame in enumerate(frames):
26
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmpfile:
27
+ cv2.imwrite(tmpfile.name, frame)
28
+ try:
29
+ result = DeepFace.analyze(
30
+ img_path=tmpfile.name,
31
+ actions=['emotion'],
32
+ enforce_detection=False,
33
+ detector_backend='opencv',
34
+ prog_backend='pytorch'
35
+ )
36
+ if isinstance(result, list):
37
+ emotions_summary.append(result[0]['emotion'])
38
+ else:
39
+ emotions_summary.append(result['emotion'])
40
+ except Exception as e:
41
+ print(f"Frame {i} skipped: {e}")
42
+ finally:
43
+ os.unlink(tmpfile.name)
44
+
45
+ df = pd.DataFrame(emotions_summary)
46
+ emotion_means = df.mean().sort_values(ascending=False)
47
+
48
+ # Plot
49
+ plt.figure(figsize=(10, 5))
50
+ emotion_means.plot(kind='bar', color='skyblue')
51
+ plt.title("Average Emotions in Video")
52
+ plt.ylabel("Probability")
53
+ plt.xticks(rotation=45)
54
+ plt.tight_layout()
55
+ plt.savefig("emotion_chart.png")
56
+ plt.close()
57
+
58
+ summary = "**Video Analysis Complete**\n"
59
+ summary += f"**Frames Analyzed:** {len(frames)}\n"
60
+ summary += f"**Duration:** {round(len(frames) * 2.0, 1)} seconds\n\n"
61
+ summary += "**Average Emotions:**\n"
62
+ for emotion, value in emotion_means.items():
63
+ summary += f"• {emotion.capitalize()}: {value:.1f}%\n"
64
+
65
+ return "emotion_chart.png", summary
66
+
67
+ demo = gr.Interface(
68
+ fn=analyze_video,
69
+ inputs=gr.Video(label="Upload a Video"),
70
+ outputs=[
71
+ gr.Image(label="Emotion Chart"),
72
+ gr.Markdown(label="Emotion Summary")
73
+ ],
74
+ title="Emotion Recognition from Video",
75
+ description="Upload a short video. The app analyzes emotions every 2 seconds using DeepFace and PyTorch."
76
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  if __name__ == "__main__":
79
+ demo.launch()