asad231 commited on
Commit
2daa6b4
·
verified ·
1 Parent(s): a50d8ba

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ import numpy as np
4
+ import librosa
5
+
6
+ # 1. Load your trained model (must be in the same folder, named model.h5)
7
+ model = tf.keras.models.load_model("model.h5")
8
+
9
+ # 2. Define labels & emojis (match your model’s output classes)
10
+ EMOTIONS = ["Neutral", "Calm", "Happy", "Sad", "Angry", "Fearful", "Disgust", "Surprised"]
11
+ EMOJI_MAP = {
12
+ "Neutral": "😐", "Calm": "😌", "Happy": "😄", "Sad": "😢",
13
+ "Angry": "😠", "Fearful": "😨", "Disgust": "🤢", "Surprised": "😲"
14
+ }
15
+
16
+ def predict_emotion(audio_path):
17
+ # Load & preprocess audio
18
+ y, sr = librosa.load(audio_path, sr=22050)
19
+ mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
20
+ features = np.mean(mfcc.T, axis=0).reshape(1, -1)
21
+
22
+ # Run model
23
+ preds = model.predict(features)
24
+ idx = np.argmax(preds, axis=1)[0]
25
+ label = EMOTIONS[idx]
26
+ emoji = EMOJI_MAP[label]
27
+ return f"{label} {emoji}"
28
+
29
+ # 3. Build Gradio Interface
30
+ demo = gr.Interface(
31
+ fn=predict_emotion,
32
+ inputs=gr.Audio(source="upload", type="filepath", label="Upload a .wav file"),
33
+ outputs=gr.Text(label="Predicted Emotion"),
34
+ title="🎤 Voice Emotion AI",
35
+ description="Upload a voice clip (.wav) to detect the speaker’s emotion."
36
+ )
37
+
38
+ if __name__ == "__main__":
39
+ demo.launch()