File size: 1,674 Bytes
cae86cc
 
 
 
 
0ea75df
 
 
 
 
 
cae86cc
 
ccaa441
 
 
cae86cc
 
 
 
 
ccaa441
cae86cc
57dd5e9
ccaa441
 
cae86cc
ccaa441
cae86cc
 
 
a391cc5
57dd5e9
cae86cc
f2c3748
cae86cc
 
 
f2c3748
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from transformers import pipeline
import soundfile as sf
import os

try:
    classifier = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er")
except Exception as e:
    def error_fn(audio_file):
        return {"error": f"Failed to load the model. Please check the logs. Error: {str(e)}"}
    classifier = None

def predict_emotion(audio_file):
    if classifier is None: return {"error": "The AI model could not be loaded."}
    if audio_file is None: return {"error": "No audio input provided."}
    if isinstance(audio_file, str): audio_path = audio_file
    elif isinstance(audio_file, tuple):
        sample_rate, audio_array = audio_file
        temp_audio_path = "temp_audio_from_mic.wav"
        sf.write(temp_audio_path, audio_array, sample_rate)
        audio_path = temp_audio_path
    else: return {"error": f"Invalid audio input format: {type(audio_file)}"}
    try:
        results = classifier(audio_path, top_k=5)
        return {item['label']: round(item['score'], 3) for item in results}
    except Exception as e: return {"error": f"An error occurred during prediction: {str(e)}"}
    finally:
        if 'temp_audio_path' in locals() and os.path.exists(temp_audio_path): os.remove(temp_audio_path)

iface = gr.Interface(
    fn=predict_emotion,
    inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="Upload Audio or Record with Microphone"),
    outputs=gr.Label(num_top_classes=5, label="Emotion Probabilities"),
    title="AI Audio Emotion Detector",
    description="Upload an audio file or record your voice to detect emotions.",
)

if __name__ == "__main__":
    iface.queue().launch()