Benitha commited on
Commit
c2962bc
ยท
verified ยท
1 Parent(s): 6f45095

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -0
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+ import whisper
5
+ from collections import Counter
6
+ import matplotlib.pyplot as plt
7
+
8
+ # Load models
9
+ emotion_classifier = pipeline("audio-classification", model="superb/hubert-large-superb-er")
10
+ whisper_model = whisper.load_model("base")
11
+
12
+ def create_emotion_chart(labels, scores):
13
+ emoji_map = {
14
+ "hap": "๐Ÿ˜Š Happy", "sad": "๐Ÿ˜” Sad", "neu": "๐Ÿ˜ Neutral",
15
+ "ang": "๐Ÿ˜  Angry", "fea": "๐Ÿ˜จ Fear", "dis": "๐Ÿคข Disgust", "sur": "๐Ÿ˜ฎ Surprise"
16
+ }
17
+ color_map = {
18
+ "hap": "#facc15", "sad": "#60a5fa", "neu": "#a1a1aa",
19
+ "ang": "#ef4444", "fea": "#818cf8", "dis": "#14b8a6", "sur": "#f472b6"
20
+ }
21
+ display_labels = [emoji_map.get(label, label) for label in labels]
22
+ colors = [color_map.get(label, "#60a5fa") for label in labels]
23
+ fig, ax = plt.subplots(figsize=(5, 3.5))
24
+ bars = ax.barh(display_labels, scores, color=colors, edgecolor="black", height=0.5)
25
+ for bar, score in zip(bars, scores):
26
+ ax.text(bar.get_width() + 0.02, bar.get_y() + bar.get_height() / 2, f"{score:.2f}", va='center', fontsize=10)
27
+ ax.set_xlim(0, 1)
28
+ ax.set_title("๐ŸŽญ Emotion Confidence Scores", fontsize=13, pad=10)
29
+ ax.invert_yaxis()
30
+ ax.set_facecolor("#f9fafb")
31
+ fig.patch.set_facecolor("#f9fafb")
32
+ for spine in ax.spines.values():
33
+ spine.set_visible(False)
34
+ ax.tick_params(axis='x', colors='gray')
35
+ ax.tick_params(axis='y', colors='gray')
36
+ return fig
37
+
38
+ def generate_next_moves(dominant_emotion, conf_score, transcript=""):
39
+ suggestions = []
40
+ harsh_words = ["bad", "ugly", "terrible", "hate", "worst"]
41
+ positive_tone_negative_words = any(word in transcript.lower() for word in harsh_words) if "happiness" in dominant_emotion else False
42
+ if 'sadness' in dominant_emotion:
43
+ suggestions.append("Your tone feels low โ€” try lifting the pitch slightly to bring more warmth.")
44
+ suggestions.append("Even if the words are positive, a brighter tone helps convey enthusiasm.")
45
+ elif 'happiness' in dominant_emotion and conf_score >= 80:
46
+ suggestions.append("Nice energy! Try modulating your tone even more for emphasis in key moments.")
47
+ suggestions.append("Experiment with subtle emotional shifts as you speak for more depth.")
48
+ elif 'neutral' in dominant_emotion:
49
+ suggestions.append("Add inflection to break a monotone pattern โ€” especially at the ends of sentences.")
50
+ suggestions.append("Highlight your message by stressing emotionally important words.")
51
+ elif conf_score < 50:
52
+ suggestions.append("Try exaggerating vocal ups and downs when reading to unlock more expression.")
53
+ suggestions.append("Slow down slightly and stretch certain words to vary your delivery.")
54
+ else:
55
+ suggestions.append("Keep practicing tone variation โ€” youโ€™re building a solid base.")
56
+ if positive_tone_negative_words:
57
+ suggestions.append("Your tone was upbeat, but the word choices were harsh โ€” aim to align both for better impact.")
58
+ return "\n- " + "\n- ".join(suggestions)
59
+
60
+ def generate_personacoach_report(emotions, transcript):
61
+ report = "## ๐Ÿ“ **Your PersonaCoach Report**\n---\n\n"
62
+ report += "### ๐Ÿ—’๏ธ **What You Said:**\n"
63
+ report += f"> _{transcript.strip()}_\n\n"
64
+ label_map = {
65
+ 'hap': '๐Ÿ˜Š happiness', 'sad': '๐Ÿ˜” sadness', 'neu': '๐Ÿ˜ neutral',
66
+ 'ang': '๐Ÿ˜  anger', 'fea': '๐Ÿ˜จ fear', 'dis': '๐Ÿคข disgust', 'sur': '๐Ÿ˜ฎ surprise'
67
+ }
68
+ for e in emotions:
69
+ e['emotion'] = label_map.get(e['label'], e['label'])
70
+ scores = [s['score'] for s in emotions]
71
+ top_score = max(scores)
72
+ conf_score = int(top_score * 100)
73
+ meaningful_emotions = [(e['emotion'], e['score']) for e in emotions if e['score'] >= 0.2]
74
+ emotion_labels = [e[0] for e in meaningful_emotions]
75
+ dominant_emotion = emotion_labels[0] if emotion_labels else "neutral"
76
+
77
+ report += f"### ๐ŸŽฏ **Tone Strength:**\n- Your tone scored **{conf_score}/100** in clarity.\n\n"
78
+ report += "### ๐Ÿ—ฃ๏ธ **Emotion & Delivery:**\n"
79
+ if meaningful_emotions:
80
+ emotions_str = ", ".join([f"**{label}** ({score:.2f})" for label, score in meaningful_emotions])
81
+ report += f"- Emotionally, your voice showed: {emotions_str}\n"
82
+ else:
83
+ report += "- Your tone wasnโ€™t clearly expressive. Try reading with a bit more emphasis or emotion.\n"
84
+ filler_words = ["um", "uh", "like", "you know", "so", "actually", "basically", "literally"]
85
+ words = transcript.lower().split()
86
+ total_words = len(words)
87
+ filler_count = sum(words.count(fw) for fw in filler_words)
88
+ filler_ratio = filler_count / total_words if total_words > 0 else 0
89
+
90
+ report += "\n### ๐Ÿ’ฌ **Pausing Style (e.g., 'um', 'like', 'you know'):**\n"
91
+ report += f"- You used **{filler_count}** hesitation phrases out of **{total_words}** words.\n"
92
+ if filler_ratio > 0.06:
93
+ report += "- Try pausing instead of using fillers โ€” it builds stronger presence.\n"
94
+ elif filler_ratio > 0.03:
95
+ report += "- A few slipped in. Practice holding space with silence instead.\n"
96
+ else:
97
+ report += "- Great fluency โ€” you stayed focused and controlled.\n"
98
+
99
+ report += "\n### โœ… **What You're Doing Well:**\n"
100
+ if top_score >= 0.75 and filler_ratio < 0.03:
101
+ report += "- Confident tone and smooth delivery โ€” keep it up!\n"
102
+ else:
103
+ report += "- Youโ€™re on track. Keep refining tone and pacing.\n"
104
+
105
+ report += "\n### ๐Ÿงญ **Next Moves:**\n"
106
+ report += generate_next_moves(dominant_emotion, conf_score, transcript) + "\n"
107
+ return report
108
+
109
+ def analyze_audio(audio_path):
110
+ result = whisper_model.transcribe(audio_path)
111
+ transcript = result['text']
112
+ emotion_results = emotion_classifier(audio_path)
113
+ labels = [r['label'] for r in emotion_results]
114
+ scores = [r['score'] for r in emotion_results]
115
+ fig = create_emotion_chart(labels, scores)
116
+ report = generate_personacoach_report(emotion_results, transcript)
117
+ return transcript, fig, report
118
+
119
+ with gr.Blocks(title="SPEAK: PersonaCoach", theme=gr.themes.Soft()) as app:
120
+ gr.Markdown("""
121
+ <div style="text-align:center; margin-bottom: 1rem;">
122
+ <h1 style="font-size: 2.2rem; margin-bottom: 0.2rem;">๐ŸŽค SPEAK: PersonaCoach</h1>
123
+ <p style="color: gray;">Your smart voice reflection tool โ€” assess tone, confidence, and delivery</p>
124
+ </div>
125
+ """, elem_id="header")
126
+
127
+ with gr.Row():
128
+ with gr.Column(scale=4):
129
+ audio_input = gr.Audio(type="filepath", label="๐ŸŽง Upload Your Voice (.wav)", elem_id="upload-audio")
130
+ with gr.Column(scale=1, min_width=120):
131
+ analyze_btn = gr.Button("๐Ÿ” Analyze", size="sm", elem_id="analyze-btn")
132
+
133
+ gr.Markdown("## ๐Ÿง  Results", elem_id="results-header")
134
+
135
+ with gr.Row(equal_height=True):
136
+ with gr.Column(scale=2):
137
+ feedback_output = gr.Markdown(label="๐Ÿ“„ PersonaCoach Feedback", elem_id="report-section")
138
+ with gr.Column(scale=1):
139
+ emotion_plot = gr.Plot(label="๐ŸŽญ Emotion Chart", elem_id="chart")
140
+
141
+ analyze_btn.click(
142
+ fn=analyze_audio,
143
+ inputs=audio_input,
144
+ outputs=[gr.Textbox(visible=False), emotion_plot, feedback_output]
145
+ )
146
+
147
+ app.launch()