update app v3
Browse fileschanges due to uncompatible between deepface and tensorflow
app.py
CHANGED
@@ -5,12 +5,25 @@ import pandas as pd
|
|
5 |
from datetime import datetime
|
6 |
import time
|
7 |
from transformers import pipeline
|
8 |
-
from deepface import DeepFace
|
9 |
import librosa
|
10 |
from python_speech_features import mfcc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Initialize models
|
13 |
voice_classifier = pipeline("audio-classification", model="superb/hubert-base-superb-er")
|
|
|
|
|
14 |
|
15 |
# Global variables to store results
|
16 |
emotion_history = []
|
@@ -18,22 +31,38 @@ current_emotions = {"face": "neutral", "voice": "neutral"}
|
|
18 |
last_update_time = time.time()
|
19 |
|
20 |
def analyze_face(frame):
|
21 |
-
"""Analyze facial expressions in the frame using
|
22 |
try:
|
23 |
-
#
|
24 |
-
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
return dominant_emotion, emotions
|
33 |
-
|
|
|
34 |
except Exception as e:
|
35 |
print(f"Face analysis error: {e}")
|
36 |
-
return "neutral", {
|
37 |
|
38 |
def analyze_voice(audio):
|
39 |
"""Analyze voice tone from audio"""
|
|
|
5 |
from datetime import datetime
|
6 |
import time
|
7 |
from transformers import pipeline
|
|
|
8 |
import librosa
|
9 |
from python_speech_features import mfcc
|
10 |
+
import onnxruntime as ort
|
11 |
+
import requests
|
12 |
+
import os
|
13 |
+
|
14 |
+
# Download emotion recognition ONNX model
|
15 |
+
MODEL_URL = "https://github.com/onnx/models/raw/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-8.onnx"
|
16 |
+
MODEL_PATH = "emotion-ferplus-8.onnx"
|
17 |
+
|
18 |
+
if not os.path.exists(MODEL_PATH):
|
19 |
+
response = requests.get(MODEL_URL)
|
20 |
+
with open(MODEL_PATH, "wb") as f:
|
21 |
+
f.write(response.content)
|
22 |
|
23 |
# Initialize models
|
24 |
voice_classifier = pipeline("audio-classification", model="superb/hubert-base-superb-er")
|
25 |
+
emotion_session = ort.InferenceSession(MODEL_PATH)
|
26 |
+
emotion_labels = ['neutral', 'happy', 'surprise', 'sad', 'angry', 'disgust', 'fear', 'contempt']
|
27 |
|
28 |
# Global variables to store results
|
29 |
emotion_history = []
|
|
|
31 |
last_update_time = time.time()
|
32 |
|
33 |
def analyze_face(frame):
|
34 |
+
"""Analyze facial expressions in the frame using ONNX model"""
|
35 |
try:
|
36 |
+
# Preprocess frame
|
37 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
38 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
39 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
40 |
|
41 |
+
if len(faces) > 0:
|
42 |
+
x, y, w, h = faces[0]
|
43 |
+
face_roi = gray[y:y+h, x:x+w]
|
44 |
+
face_roi = cv2.resize(face_roi, (64, 64))
|
45 |
+
face_roi = face_roi.astype('float32') / 255.0
|
46 |
+
face_roi = np.expand_dims(face_roi, axis=0)
|
47 |
+
face_roi = np.expand_dims(face_roi, axis=0)
|
48 |
+
|
49 |
+
# Run inference
|
50 |
+
input_name = emotion_session.get_inputs()[0].name
|
51 |
+
output_name = emotion_session.get_outputs()[0].name
|
52 |
+
results = emotion_session.run([output_name], {input_name: face_roi})[0]
|
53 |
+
|
54 |
+
# Get emotion probabilities
|
55 |
+
emotion_probs = results[0]
|
56 |
+
dominant_emotion = emotion_labels[np.argmax(emotion_probs)]
|
57 |
+
|
58 |
+
# Create emotion dictionary
|
59 |
+
emotions = {label: float(prob) for label, prob in zip(emotion_labels, emotion_probs)}
|
60 |
return dominant_emotion, emotions
|
61 |
+
|
62 |
+
return "neutral", {label: 0.0 for label in emotion_labels}
|
63 |
except Exception as e:
|
64 |
print(f"Face analysis error: {e}")
|
65 |
+
return "neutral", {label: 0.0 for label in emotion_labels}
|
66 |
|
67 |
def analyze_voice(audio):
|
68 |
"""Analyze voice tone from audio"""
|