|
|
|
import gradio as gr |
|
from transformers import pipeline |
|
import whisper |
|
from collections import Counter |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
emotion_classifier = pipeline("audio-classification", model="superb/hubert-large-superb-er") |
|
whisper_model = whisper.load_model("base") |
|
|
|
|
|
def create_emotion_chart(labels, scores): |
|
emoji_map = { |
|
"hap": "๐ Happy", "sad": "๐ Sad", "neu": "๐ Neutral", |
|
"ang": "๐ Angry", "fea": "๐จ Fear", "dis": "๐คข Disgust", "sur": "๐ฎ Surprise" |
|
} |
|
color_map = { |
|
"hap": "#facc15", "sad": "#60a5fa", "neu": "#a1a1aa", |
|
"ang": "#ef4444", "fea": "#818cf8", "dis": "#14b8a6", "sur": "#f472b6" |
|
} |
|
labels = [emoji_map.get(label, label) for label in labels] |
|
colors = [color_map.get(label, "#60a5fa") for label in labels] |
|
|
|
fig, ax = plt.subplots(figsize=(5, 3.5)) |
|
bars = ax.barh(labels, scores, color=colors, edgecolor="black", height=0.5) |
|
for bar, score in zip(bars, scores): |
|
ax.text(bar.get_width() + 0.02, bar.get_y() + bar.get_height() / 2, |
|
f"{score:.2f}", va='center', fontsize=10, color='black') |
|
ax.set_xlim(0, 1) |
|
ax.set_title("๐ญ Emotion Confidence Scores", fontsize=13, pad=10) |
|
ax.invert_yaxis() |
|
ax.set_facecolor("#f9fafb") |
|
fig.patch.set_facecolor("#f9fafb") |
|
for spine in ax.spines.values(): |
|
spine.set_visible(False) |
|
ax.tick_params(axis='x', colors='gray') |
|
ax.tick_params(axis='y', colors='gray') |
|
return fig |
|
|
|
def generate_next_moves(dominant_emotion, conf_score, transcript=""): |
|
suggestions = [] |
|
harsh_words = ["bad", "ugly", "terrible", "hate", "worst"] |
|
positive_tone_negative_words = any(word in transcript.lower() for word in harsh_words) if "happiness" in dominant_emotion else False |
|
|
|
if 'sadness' in dominant_emotion: |
|
suggestions.append("Your tone feels low โ try lifting the pitch slightly to bring more warmth.") |
|
suggestions.append("Even if the words are positive, a brighter tone helps convey enthusiasm.") |
|
elif 'happiness' in dominant_emotion and conf_score >= 80: |
|
suggestions.append("Nice energy! Try modulating your tone even more for emphasis in key moments.") |
|
suggestions.append("Experiment with subtle emotional shifts as you speak for more depth.") |
|
elif 'neutral' in dominant_emotion: |
|
suggestions.append("Add inflection to break a monotone pattern โ especially at the ends of sentences.") |
|
suggestions.append("Highlight your message by stressing emotionally important words.") |
|
elif conf_score < 50: |
|
suggestions.append("Try exaggerating vocal ups and downs when reading to unlock more expression.") |
|
suggestions.append("Slow down slightly and stretch certain words to vary your delivery.") |
|
else: |
|
suggestions.append("Keep practicing tone variation โ youโre building a solid base.") |
|
|
|
if positive_tone_negative_words: |
|
suggestions.append("Your tone was upbeat, but the word choices were harsh โ aim to align both for better impact.") |
|
return "\n- " + "\n- ".join(suggestions) |
|
|
|
def generate_personacoach_report(emotions, transcript): |
|
report = "## ๐ Your PersonaCoach Report\n---\n\n" |
|
report += "### ๐๏ธ What You Said:\n" |
|
report += f"> _{transcript.strip()}_\n\n" |
|
|
|
label_map = { |
|
'hap': '๐ happiness', 'sad': '๐ sadness', 'neu': '๐ neutral', |
|
'ang': '๐ anger', 'fea': '๐จ fear', 'dis': '๐คข disgust', 'sur': '๐ฎ surprise' |
|
} |
|
for e in emotions: |
|
e['emotion'] = label_map.get(e['label'], e['label']) |
|
|
|
scores = [e['score'] for e in emotions] |
|
top_score = max(scores) |
|
conf_score = int(top_score * 100) |
|
|
|
emotion_labels = [e['emotion'] for e in emotions if e['score'] >= 0.2] |
|
dominant_emotion = emotion_labels[0] if emotion_labels else "neutral" |
|
|
|
report += f"### ๐ฏ Tone Strength:\n- Your tone scored **{conf_score}/100** in clarity.\n\n" |
|
report += "### ๐ฃ๏ธ Emotion & Delivery:\n" |
|
if emotion_labels: |
|
emo_str = ", ".join([f"{e['emotion']} ({e['score']:.2f})" for e in emotions]) |
|
report += f"- Emotionally, your voice showed: {emo_str}\n" |
|
else: |
|
report += "- Your tone wasnโt clearly expressive. Try reading with a bit more emphasis or emotion.\n" |
|
|
|
filler_words = ["um", "uh", "like", "you know", "so", "actually", "basically", "literally"] |
|
words = transcript.lower().split() |
|
total_words = len(words) |
|
filler_count = sum(words.count(fw) for fw in filler_words) |
|
filler_ratio = filler_count / total_words if total_words > 0 else 0 |
|
|
|
report += "\n### ๐ฌ Pausing Style:\n" |
|
report += f"- {filler_count} fillers out of {total_words} words.\n" |
|
if filler_ratio > 0.06: |
|
report += "- Try pausing instead of fillers.\n" |
|
elif filler_ratio > 0.03: |
|
report += "- A few fillers โ consider tightening up delivery.\n" |
|
else: |
|
report += "- Strong fluency โ great control.\n" |
|
|
|
report += "\n### ๐งญ Next Moves:\n" |
|
report += generate_next_moves(dominant_emotion, conf_score, transcript) |
|
return report |
|
|
|
def analyze_audio(audio_path): |
|
result = whisper_model.transcribe(audio_path) |
|
transcript = result['text'] |
|
emotion_results = emotion_classifier(audio_path) |
|
labels = [r['label'] for r in emotion_results] |
|
scores = [r['score'] for r in emotion_results] |
|
fig = create_emotion_chart(labels, scores) |
|
report = generate_personacoach_report(emotion_results, transcript) |
|
return transcript, fig, report |
|
|
|
interface = gr.Interface( |
|
fn=analyze_audio, |
|
inputs=gr.Audio(type="filepath", label="Upload your voice (.wav only)"), |
|
outputs=[ |
|
gr.Textbox(label="๐ Transcription"), |
|
gr.Plot(label="๐ญ Emotion Chart"), |
|
gr.Markdown(label="๐ PersonaCoach Feedback") |
|
], |
|
title="SPEAK โ Speech Performance Evaluation and Affective Knowledge", |
|
description="Upload a voice sample and get coaching feedback on tone, emotion, and fluency." |
|
) |
|
|
|
interface.launch() |
|
|