Benitha commited on
Commit
6f45095
ยท
verified ยท
1 Parent(s): 94e402f

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -136
app.py DELETED
@@ -1,136 +0,0 @@
1
-
2
- import gradio as gr
3
- from transformers import pipeline
4
- import whisper
5
- from collections import Counter
6
- import matplotlib.pyplot as plt
7
-
8
- # Load models
9
- emotion_classifier = pipeline("audio-classification", model="superb/hubert-large-superb-er")
10
- whisper_model = whisper.load_model("base")
11
-
12
- # ==== Chart Logic ====
13
- def create_emotion_chart(labels, scores):
14
- emoji_map = {
15
- "hap": "๐Ÿ˜Š Happy", "sad": "๐Ÿ˜” Sad", "neu": "๐Ÿ˜ Neutral",
16
- "ang": "๐Ÿ˜  Angry", "fea": "๐Ÿ˜จ Fear", "dis": "๐Ÿคข Disgust", "sur": "๐Ÿ˜ฎ Surprise"
17
- }
18
- color_map = {
19
- "hap": "#facc15", "sad": "#60a5fa", "neu": "#a1a1aa",
20
- "ang": "#ef4444", "fea": "#818cf8", "dis": "#14b8a6", "sur": "#f472b6"
21
- }
22
- display_labels = [emoji_map.get(label, label) for label in labels]
23
- colors = [color_map.get(label, "#60a5fa") for label in labels]
24
- fig, ax = plt.subplots(figsize=(5, 3.5))
25
- bars = ax.barh(display_labels, scores, color=colors, edgecolor="black", height=0.5)
26
- for bar, score in zip(bars, scores):
27
- ax.text(bar.get_width() + 0.02, bar.get_y() + bar.get_height() / 2, f"{score:.2f}", va='center', fontsize=10)
28
- ax.set_xlim(0, 1)
29
- ax.set_title("๐ŸŽญ Emotion Confidence Scores", fontsize=13, pad=10)
30
- ax.invert_yaxis()
31
- ax.set_facecolor("#f9fafb")
32
- fig.patch.set_facecolor("#f9fafb")
33
- for spine in ax.spines.values():
34
- spine.set_visible(False)
35
- ax.tick_params(axis='x', colors='gray')
36
- ax.tick_params(axis='y', colors='gray')
37
- return fig
38
-
39
- # ==== Feedback Generation ====
40
- def generate_next_moves(dominant_emotion, conf_score, transcript=""):
41
- suggestions = []
42
- harsh_words = ["bad", "ugly", "terrible", "hate", "worst"]
43
- positive_tone_negative_words = any(word in transcript.lower() for word in harsh_words) if "happiness" in dominant_emotion else False
44
- if 'sadness' in dominant_emotion:
45
- suggestions.append("Your tone feels low โ€” try lifting the pitch slightly to bring more warmth.")
46
- suggestions.append("Even if the words are positive, a brighter tone helps convey enthusiasm.")
47
- elif 'happiness' in dominant_emotion and conf_score >= 80:
48
- suggestions.append("Nice energy! Try modulating your tone even more for emphasis in key moments.")
49
- suggestions.append("Experiment with subtle emotional shifts as you speak for more depth.")
50
- elif 'neutral' in dominant_emotion:
51
- suggestions.append("Add inflection to break a monotone pattern โ€” especially at the ends of sentences.")
52
- suggestions.append("Highlight your message by stressing emotionally important words.")
53
- elif conf_score < 50:
54
- suggestions.append("Try exaggerating vocal ups and downs when reading to unlock more expression.")
55
- suggestions.append("Slow down slightly and stretch certain words to vary your delivery.")
56
- else:
57
- suggestions.append("Keep practicing tone variation โ€” youโ€™re building a solid base.")
58
- if positive_tone_negative_words:
59
- suggestions.append("Your tone was upbeat, but the word choices were harsh โ€” aim to align both for better impact.")
60
- return "\n- " + "\n- ".join(suggestions)
61
-
62
- def generate_personacoach_report(emotions, transcript):
63
- report = "## ๐Ÿ“ **Your PersonaCoach Report**\n---\n\n"
64
- report += "### ๐Ÿ—’๏ธ **What You Said:**\n"
65
- report += f"> _{transcript.strip()}_\n\n"
66
- label_map = {
67
- 'hap': '๐Ÿ˜Š happiness', 'sad': '๐Ÿ˜” sadness', 'neu': '๐Ÿ˜ neutral',
68
- 'ang': '๐Ÿ˜  anger', 'fea': '๐Ÿ˜จ fear', 'dis': '๐Ÿคข disgust', 'sur': '๐Ÿ˜ฎ surprise'
69
- }
70
- for e in emotions:
71
- e['emotion'] = label_map.get(e['label'], e['label'])
72
- scores = [s['score'] for s in emotions]
73
- top_score = max(scores)
74
- conf_score = int(top_score * 100)
75
- meaningful_emotions = [(e['emotion'], e['score']) for e in emotions if e['score'] >= 0.2]
76
- emotion_labels = [e[0] for e in meaningful_emotions]
77
- dominant_emotion = emotion_labels[0] if emotion_labels else "neutral"
78
-
79
- report += "### ๐ŸŽฏ **Tone Strength:**\n"
80
- report += f"- Your tone scored **{conf_score}/100** in clarity.\n\n"
81
- report += "### ๐Ÿ—ฃ๏ธ **Emotion & Delivery:**\n"
82
- if meaningful_emotions:
83
- emotions_str = ", ".join([f"**{label}** ({score:.2f})" for label, score in meaningful_emotions])
84
- report += f"- Emotionally, your voice showed: {emotions_str}\n"
85
- else:
86
- report += "- Your tone wasnโ€™t clearly expressive. Try reading with a bit more emphasis or emotion.\n"
87
- filler_words = ["um", "uh", "like", "you know", "so", "actually", "basically", "literally"]
88
- words = transcript.lower().split()
89
- total_words = len(words)
90
- filler_count = sum(words.count(fw) for fw in filler_words)
91
- filler_ratio = filler_count / total_words if total_words > 0 else 0
92
-
93
- report += "\n### ๐Ÿ’ฌ **Pausing Style (e.g., 'um', 'like', 'you know'):**\n"
94
- report += f"- You used **{filler_count}** hesitation phrases out of **{total_words}** words.\n"
95
- if filler_ratio > 0.06:
96
- report += "- Try pausing instead of using fillers โ€” it builds stronger presence.\n"
97
- elif filler_ratio > 0.03:
98
- report += "- A few slipped in. Practice holding space with silence instead.\n"
99
- else:
100
- report += "- Great fluency โ€” you stayed focused and controlled.\n"
101
-
102
- report += "\n### โœ… **What You're Doing Well:**\n"
103
- if top_score >= 0.75 and filler_ratio < 0.03:
104
- report += "- Confident tone and smooth delivery โ€” keep it up!\n"
105
- else:
106
- report += "- Youโ€™re on track. Keep refining tone and pacing.\n"
107
-
108
- report += "\n### ๐Ÿงญ **Next Moves:**\n"
109
- report += generate_next_moves(dominant_emotion, conf_score, transcript) + "\n"
110
- return report
111
-
112
- # ==== Main Interface Logic ====
113
- def analyze_audio(audio_path):
114
- result = whisper_model.transcribe(audio_path)
115
- transcript = result['text']
116
- emotion_results = emotion_classifier(audio_path)
117
- labels = [r['label'] for r in emotion_results]
118
- scores = [r['score'] for r in emotion_results]
119
- fig = create_emotion_chart(labels, scores)
120
- report = generate_personacoach_report(emotion_results, transcript)
121
- return transcript, fig, report
122
-
123
- # ==== Gradio UI ====
124
- interface = gr.Interface(
125
- fn=analyze_audio,
126
- inputs=gr.Audio(type="filepath", label="๐ŸŽง Upload Voice"),
127
- outputs=[
128
- gr.Textbox(label="๐Ÿ“ Transcription"),
129
- gr.Plot(label="๐ŸŽญ Emotion Chart"),
130
- gr.Markdown(label="๐Ÿ“„ PersonaCoach Feedback")
131
- ],
132
- title="SPEAK โ€“ Speech Performance Evaluation and Affective Knowledge",
133
- description="Upload your voice and receive a tone-based coaching report powered by HuBERT + Whisper."
134
- )
135
-
136
- interface.launch()