Spaces:
Runtime error
Runtime error
requirements.txt
Browse filesgradio==3.37.0
librosa
numpy
scikit-learn
deepface
opencv-python
joblib
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import librosa
|
3 |
+
import numpy as np
|
4 |
+
from sklearn.ensemble import RandomForestClassifier
|
5 |
+
import joblib
|
6 |
+
from deepface import DeepFace
|
7 |
+
|
8 |
+
# —— 假设你的语音模型已经训练好并存为 voice_model.joblib ——
|
9 |
+
voice_clf = joblib.load("voice_model.joblib")
|
10 |
+
|
11 |
+
# 文字分析
|
12 |
+
def analyze_text(text):
|
13 |
+
if any(w in text for w in ["開心","快樂"]): return "happy"
|
14 |
+
if any(w in text for w in ["生氣","憤怒"]): return "angry"
|
15 |
+
if any(w in text for w in ["傷心","難過","哭"]): return "sad"
|
16 |
+
if any(w in text for w in ["驚","意外"]): return "surprise"
|
17 |
+
if any(w in text for w in ["怕","恐懼"]): return "fear"
|
18 |
+
return "neutral"
|
19 |
+
|
20 |
+
# 語音分析
|
21 |
+
def analyze_audio(path):
|
22 |
+
y_audio, sr = librosa.load(path, sr=None)
|
23 |
+
mfccs = np.mean(librosa.feature.mfcc(y=y_audio, sr=sr, n_mfcc=13).T, axis=0).reshape(1, -1)
|
24 |
+
return voice_clf.predict(mfccs)[0]
|
25 |
+
|
26 |
+
# 臉部分析
|
27 |
+
def analyze_face(img):
|
28 |
+
res = DeepFace.analyze(img, actions=['emotion'], enforce_detection=False)
|
29 |
+
return res[0]['dominant_emotion']
|
30 |
+
|
31 |
+
# 定義 Gradio 介面
|
32 |
+
with gr.Blocks() as demo:
|
33 |
+
gr.Markdown("# 多模態情緒分析")
|
34 |
+
with gr.Tab("📝 文字"):
|
35 |
+
t = gr.Textbox(placeholder="輸入中文…")
|
36 |
+
bt = gr.Button("分析文字")
|
37 |
+
out_t = gr.Textbox()
|
38 |
+
bt.click(analyze_text, inputs=t, outputs=out_t)
|
39 |
+
with gr.Tab("🎤 語音"):
|
40 |
+
a = gr.Audio(type="filepath")
|
41 |
+
ba = gr.Button("分析語音")
|
42 |
+
out_a = gr.Textbox()
|
43 |
+
ba.click(analyze_audio, inputs=a, outputs=out_a)
|
44 |
+
with gr.Tab("📷 臉部"):
|
45 |
+
im = gr.Image(source="webcam")
|
46 |
+
bi = gr.Button("分析表情")
|
47 |
+
out_i = gr.Textbox()
|
48 |
+
bi.click(analyze_face, inputs=im, outputs=out_i)
|