Benitha commited on
Commit
06ba5da
ยท
verified ยท
1 Parent(s): 701deaf

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +133 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+ import whisper
5
+ from collections import Counter
6
+ import matplotlib.pyplot as plt
7
+
8
+ # Load models
9
+ emotion_classifier = pipeline("audio-classification", model="superb/hubert-large-superb-er")
10
+ whisper_model = whisper.load_model("base")
11
+
12
+ # Chart generator
13
+ def create_emotion_chart(labels, scores):
14
+ emoji_map = {
15
+ "hap": "๐Ÿ˜Š Happy", "sad": "๐Ÿ˜” Sad", "neu": "๐Ÿ˜ Neutral",
16
+ "ang": "๐Ÿ˜  Angry", "fea": "๐Ÿ˜จ Fear", "dis": "๐Ÿคข Disgust", "sur": "๐Ÿ˜ฎ Surprise"
17
+ }
18
+ color_map = {
19
+ "hap": "#facc15", "sad": "#60a5fa", "neu": "#a1a1aa",
20
+ "ang": "#ef4444", "fea": "#818cf8", "dis": "#14b8a6", "sur": "#f472b6"
21
+ }
22
+ labels = [emoji_map.get(label, label) for label in labels]
23
+ colors = [color_map.get(label, "#60a5fa") for label in labels]
24
+
25
+ fig, ax = plt.subplots(figsize=(5, 3.5))
26
+ bars = ax.barh(labels, scores, color=colors, edgecolor="black", height=0.5)
27
+ for bar, score in zip(bars, scores):
28
+ ax.text(bar.get_width() + 0.02, bar.get_y() + bar.get_height() / 2,
29
+ f"{score:.2f}", va='center', fontsize=10, color='black')
30
+ ax.set_xlim(0, 1)
31
+ ax.set_title("๐ŸŽญ Emotion Confidence Scores", fontsize=13, pad=10)
32
+ ax.invert_yaxis()
33
+ ax.set_facecolor("#f9fafb")
34
+ fig.patch.set_facecolor("#f9fafb")
35
+ for spine in ax.spines.values():
36
+ spine.set_visible(False)
37
+ ax.tick_params(axis='x', colors='gray')
38
+ ax.tick_params(axis='y', colors='gray')
39
+ return fig
40
+
41
+ def generate_next_moves(dominant_emotion, conf_score, transcript=""):
42
+ suggestions = []
43
+ harsh_words = ["bad", "ugly", "terrible", "hate", "worst"]
44
+ positive_tone_negative_words = any(word in transcript.lower() for word in harsh_words) if "happiness" in dominant_emotion else False
45
+
46
+ if 'sadness' in dominant_emotion:
47
+ suggestions.append("Your tone feels low โ€” try lifting the pitch slightly to bring more warmth.")
48
+ suggestions.append("Even if the words are positive, a brighter tone helps convey enthusiasm.")
49
+ elif 'happiness' in dominant_emotion and conf_score >= 80:
50
+ suggestions.append("Nice energy! Try modulating your tone even more for emphasis in key moments.")
51
+ suggestions.append("Experiment with subtle emotional shifts as you speak for more depth.")
52
+ elif 'neutral' in dominant_emotion:
53
+ suggestions.append("Add inflection to break a monotone pattern โ€” especially at the ends of sentences.")
54
+ suggestions.append("Highlight your message by stressing emotionally important words.")
55
+ elif conf_score < 50:
56
+ suggestions.append("Try exaggerating vocal ups and downs when reading to unlock more expression.")
57
+ suggestions.append("Slow down slightly and stretch certain words to vary your delivery.")
58
+ else:
59
+ suggestions.append("Keep practicing tone variation โ€” youโ€™re building a solid base.")
60
+
61
+ if positive_tone_negative_words:
62
+ suggestions.append("Your tone was upbeat, but the word choices were harsh โ€” aim to align both for better impact.")
63
+ return "\n- " + "\n- ".join(suggestions)
64
+
65
+ def generate_personacoach_report(emotions, transcript):
66
+ report = "## ๐Ÿ“ Your PersonaCoach Report\n---\n\n"
67
+ report += "### ๐Ÿ—’๏ธ What You Said:\n"
68
+ report += f"> _{transcript.strip()}_\n\n"
69
+
70
+ label_map = {
71
+ 'hap': '๐Ÿ˜Š happiness', 'sad': '๐Ÿ˜” sadness', 'neu': '๐Ÿ˜ neutral',
72
+ 'ang': '๐Ÿ˜  anger', 'fea': '๐Ÿ˜จ fear', 'dis': '๐Ÿคข disgust', 'sur': '๐Ÿ˜ฎ surprise'
73
+ }
74
+ for e in emotions:
75
+ e['emotion'] = label_map.get(e['label'], e['label'])
76
+
77
+ scores = [e['score'] for e in emotions]
78
+ top_score = max(scores)
79
+ conf_score = int(top_score * 100)
80
+
81
+ emotion_labels = [e['emotion'] for e in emotions if e['score'] >= 0.2]
82
+ dominant_emotion = emotion_labels[0] if emotion_labels else "neutral"
83
+
84
+ report += f"### ๐ŸŽฏ Tone Strength:\n- Your tone scored **{conf_score}/100** in clarity.\n\n"
85
+ report += "### ๐Ÿ—ฃ๏ธ Emotion & Delivery:\n"
86
+ if emotion_labels:
87
+ emo_str = ", ".join([f"{e['emotion']} ({e['score']:.2f})" for e in emotions])
88
+ report += f"- Emotionally, your voice showed: {emo_str}\n"
89
+ else:
90
+ report += "- Your tone wasnโ€™t clearly expressive. Try reading with a bit more emphasis or emotion.\n"
91
+
92
+ filler_words = ["um", "uh", "like", "you know", "so", "actually", "basically", "literally"]
93
+ words = transcript.lower().split()
94
+ total_words = len(words)
95
+ filler_count = sum(words.count(fw) for fw in filler_words)
96
+ filler_ratio = filler_count / total_words if total_words > 0 else 0
97
+
98
+ report += "\n### ๐Ÿ’ฌ Pausing Style:\n"
99
+ report += f"- {filler_count} fillers out of {total_words} words.\n"
100
+ if filler_ratio > 0.06:
101
+ report += "- Try pausing instead of fillers.\n"
102
+ elif filler_ratio > 0.03:
103
+ report += "- A few fillers โ€” consider tightening up delivery.\n"
104
+ else:
105
+ report += "- Strong fluency โ€” great control.\n"
106
+
107
+ report += "\n### ๐Ÿงญ Next Moves:\n"
108
+ report += generate_next_moves(dominant_emotion, conf_score, transcript)
109
+ return report
110
+
111
+ def analyze_audio(audio_path):
112
+ result = whisper_model.transcribe(audio_path)
113
+ transcript = result['text']
114
+ emotion_results = emotion_classifier(audio_path)
115
+ labels = [r['label'] for r in emotion_results]
116
+ scores = [r['score'] for r in emotion_results]
117
+ fig = create_emotion_chart(labels, scores)
118
+ report = generate_personacoach_report(emotion_results, transcript)
119
+ return transcript, fig, report
120
+
121
+ interface = gr.Interface(
122
+ fn=analyze_audio,
123
+ inputs=gr.Audio(type="filepath", label="Upload your voice (.wav only)"),
124
+ outputs=[
125
+ gr.Textbox(label="๐Ÿ“ Transcription"),
126
+ gr.Plot(label="๐ŸŽญ Emotion Chart"),
127
+ gr.Markdown(label="๐Ÿ“„ PersonaCoach Feedback")
128
+ ],
129
+ title="SPEAK โ€“ Speech Performance Evaluation and Affective Knowledge",
130
+ description="Upload a voice sample and get coaching feedback on tone, emotion, and fluency."
131
+ )
132
+
133
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ gradio
3
+ matplotlib
4
+ torch
5
+ transformers
6
+ librosa
7
+ openai-whisper