Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -158,7 +158,7 @@ def process_audio(audio_path):
|
|
| 158 |
create_plot(empty, "π§ Audio Analysis"),
|
| 159 |
"No audio detected",
|
| 160 |
create_plot(empty, "π Text Analysis"),
|
| 161 |
-
create_plot(empty, "π€
|
| 162 |
"π Please provide audio input"
|
| 163 |
)
|
| 164 |
|
|
@@ -182,7 +182,7 @@ def process_audio(audio_path):
|
|
| 182 |
create_plot(audio_df, "π§ Audio Analysis"),
|
| 183 |
f"π£οΈ Transcription:\n{text}",
|
| 184 |
create_plot(text_df, "π Text Analysis"),
|
| 185 |
-
create_plot(combined_df, "π€
|
| 186 |
f"## π Dominant Emotion: {top_emotion}"
|
| 187 |
)
|
| 188 |
|
|
@@ -193,7 +193,7 @@ def process_audio(audio_path):
|
|
| 193 |
create_plot(error_df, "π§ Audio Analysis"),
|
| 194 |
"β Error processing audio",
|
| 195 |
create_plot(error_df, "π Text Analysis"),
|
| 196 |
-
create_plot(error_df, "π€
|
| 197 |
"β οΈ Processing Error"
|
| 198 |
)
|
| 199 |
|
|
@@ -202,7 +202,7 @@ def create_app():
|
|
| 202 |
"""Build enhanced Gradio interface."""
|
| 203 |
with gr.Blocks(theme=gr.themes.Soft(), title="Emotion Detection from Speech") as demo:
|
| 204 |
gr.Markdown("# Intelligent system for Bilingual Bimodal Emotion Recognition (BiBiER)")
|
| 205 |
-
gr.Markdown("Analyze emotions in speech through both audio characteristics and spoken content")
|
| 206 |
|
| 207 |
with gr.Row():
|
| 208 |
audio_input = gr.Audio(
|
|
@@ -214,7 +214,7 @@ def create_app():
|
|
| 214 |
)
|
| 215 |
|
| 216 |
with gr.Row():
|
| 217 |
-
top_emotion = gr.Markdown("## π Dominant Emotion: Waiting for input...",
|
| 218 |
elem_classes="dominant-emotion")
|
| 219 |
|
| 220 |
with gr.Row():
|
|
@@ -223,7 +223,7 @@ def create_app():
|
|
| 223 |
with gr.Column():
|
| 224 |
text_plot = gr.Plot(label="Text Analysis")
|
| 225 |
with gr.Column():
|
| 226 |
-
combined_plot = gr.Plot(label="
|
| 227 |
|
| 228 |
transcription = gr.Textbox(
|
| 229 |
label="π Transcription Results",
|
|
|
|
| 158 |
create_plot(empty, "π§ Audio Analysis"),
|
| 159 |
"No audio detected",
|
| 160 |
create_plot(empty, "π Text Analysis"),
|
| 161 |
+
create_plot(empty, "π€ Audio-Text Analysis"),
|
| 162 |
"π Please provide audio input"
|
| 163 |
)
|
| 164 |
|
|
|
|
| 182 |
create_plot(audio_df, "π§ Audio Analysis"),
|
| 183 |
f"π£οΈ Transcription:\n{text}",
|
| 184 |
create_plot(text_df, "π Text Analysis"),
|
| 185 |
+
create_plot(combined_df, "π€ Audio-Text Analysis"),
|
| 186 |
f"## π Dominant Emotion: {top_emotion}"
|
| 187 |
)
|
| 188 |
|
|
|
|
| 193 |
create_plot(error_df, "π§ Audio Analysis"),
|
| 194 |
"β Error processing audio",
|
| 195 |
create_plot(error_df, "π Text Analysis"),
|
| 196 |
+
create_plot(error_df, "π€ Audio-Text Analysis"),
|
| 197 |
"β οΈ Processing Error"
|
| 198 |
)
|
| 199 |
|
|
|
|
| 202 |
"""Build enhanced Gradio interface."""
|
| 203 |
with gr.Blocks(theme=gr.themes.Soft(), title="Emotion Detection from Speech") as demo:
|
| 204 |
gr.Markdown("# Intelligent system for Bilingual Bimodal Emotion Recognition (BiBiER)")
|
| 205 |
+
gr.Markdown("Analyze emotions in Russian and English speech through both audio characteristics and spoken content")
|
| 206 |
|
| 207 |
with gr.Row():
|
| 208 |
audio_input = gr.Audio(
|
|
|
|
| 214 |
)
|
| 215 |
|
| 216 |
with gr.Row():
|
| 217 |
+
top_emotion = gr.Markdown("## π Dominant Emotion: Waiting for input ...",
|
| 218 |
elem_classes="dominant-emotion")
|
| 219 |
|
| 220 |
with gr.Row():
|
|
|
|
| 223 |
with gr.Column():
|
| 224 |
text_plot = gr.Plot(label="Text Analysis")
|
| 225 |
with gr.Column():
|
| 226 |
+
combined_plot = gr.Plot(label="Audio-Text Analysis")
|
| 227 |
|
| 228 |
transcription = gr.Textbox(
|
| 229 |
label="π Transcription Results",
|