Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline | |
import soundfile as sf | |
import os | |
try: | |
classifier = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
except Exception as e: | |
def error_fn(audio_file): | |
return {"error": f"Failed to load the model. Please check the logs. Error: {str(e)}"} | |
classifier = None | |
def predict_emotion(audio_file): | |
if classifier is None: return {"error": "The AI model could not be loaded."} | |
if audio_file is None: return {"error": "No audio input provided."} | |
if isinstance(audio_file, str): audio_path = audio_file | |
elif isinstance(audio_file, tuple): | |
sample_rate, audio_array = audio_file | |
temp_audio_path = "temp_audio_from_mic.wav" | |
sf.write(temp_audio_path, audio_array, sample_rate) | |
audio_path = temp_audio_path | |
else: return {"error": f"Invalid audio input format: {type(audio_file)}"} | |
try: | |
results = classifier(audio_path, top_k=5) | |
return {item['label']: round(item['score'], 3) for item in results} | |
except Exception as e: return {"error": f"An error occurred during prediction: {str(e)}"} | |
finally: | |
if 'temp_audio_path' in locals() and os.path.exists(temp_audio_path): os.remove(temp_audio_path) | |
iface = gr.Interface( | |
fn=predict_emotion, | |
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="Upload Audio or Record with Microphone"), | |
outputs=gr.Label(num_top_classes=5, label="Emotion Probabilities"), | |
title="AI Audio Emotion Detector", | |
description="Upload an audio file or record your voice to detect emotions.", | |
) | |
if __name__ == "__main__": | |
iface.queue().launch() |