import gradio as gr
from transformers import pipeline
# Load the audio classification model
pipe = pipeline("audio-classification", model="dima806/english_accents_classification")
# Define the inference function with styled, color-coded output
def classify_accent(audio):
try:
result = pipe(audio)
if not result:
return "
⚠️ No prediction returned. Please try a different audio file.
"
# Start HTML table with styling
table = """
| Accent |
Confidence |
"""
for i, r in enumerate(result):
label = r['label'].capitalize()
score = f"{r['score'] * 100:.2f}%"
if i == 0:
# Highlight top accent with green background and bold text
row = f"""
| {label} |
{score} |
"""
else:
row = f"""
| {label} |
{score} |
"""
table += row
table += "
"
top_result = result[0]
return f"""
🎤 Predicted Accent: {top_result['label'].capitalize()}
{table}
"""
except Exception as e:
return f"⚠️ Error: {str(e)}
Please upload a valid English audio file (e.g., .wav, .mp3).
"
# Create and launch the Gradio app
gr.Interface(
fn=classify_accent,
inputs=gr.Audio(type="filepath", label="🎙 Record or Upload English Audio"),
outputs=gr.HTML(), # Important: Use HTML output here to render the table properly
title="🌍 English Accent Classifier",
description=(
"Upload or record an English audio sample to detect the speaker's accent.\n\n"
"**Supported accents:** American, British, Indian, African, Australian.\n"
"Audio Classification Model:\n"
"[dima806/english_accents_classification](https://huggingface.co/dima806/english_accents_classification)\n"
"Dataset: https://www.kaggle.com/code/dima806/common-voice-accent-classification\n"
),
flagging_mode="never",
theme="default"
).launch(share=True)