MaroofTechSorcerer commited on
Commit
48302e7
Β·
verified Β·
1 Parent(s): 05470ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -10
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import os
3
  import streamlit as st
4
  import tempfile
@@ -42,6 +41,9 @@ def get_emotion_classifier():
42
 
43
  def perform_emotion_detection(text):
44
  try:
 
 
 
45
  emotion_classifier = get_emotion_classifier()
46
  emotion_results = emotion_classifier(text)[0]
47
 
@@ -61,20 +63,60 @@ def perform_emotion_detection(text):
61
  "embarrassment", "fear", "grief", "nervousness", "remorse", "sadness"]
62
  neutral_emotions = ["confusion", "curiosity", "realization", "surprise", "neutral"]
63
 
64
- emotions_dict = {result['label']: result['score'] for result in emotion_results}
65
- top_emotion = max(emotions_dict, key=emotions_dict.get)
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
 
67
  if top_emotion in positive_emotions:
68
  sentiment = "POSITIVE"
69
  elif top_emotion in negative_emotions:
70
  sentiment = "NEGATIVE"
71
  else:
72
- sentiment = "NEUTRAL"
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  return emotions_dict, top_emotion, emotion_map, sentiment
75
  except Exception as e:
76
  st.error(f"Emotion detection failed: {str(e)}")
77
- return {}, "unknown", {}, "UNKNOWN"
 
78
 
79
  # Sarcasm Detection Function
80
  @st.cache_resource
@@ -86,6 +128,9 @@ def get_sarcasm_classifier():
86
 
87
  def perform_sarcasm_detection(text):
88
  try:
 
 
 
89
  sarcasm_classifier = get_sarcasm_classifier()
90
  result = sarcasm_classifier(text)[0]
91
  is_sarcastic = result['label'] == "LABEL_1"
@@ -254,7 +299,6 @@ def custom_audio_recorder():
254
  audioRecorder.streamBeingCaptured = null;
255
  }
256
  }
257
-
258
  var isRecording = false;
259
  var recordButton = document.getElementById('record-button');
260
  var audioElement = document.getElementById('audio-playback');
@@ -292,7 +336,6 @@ def custom_audio_recorder():
292
  });
293
  }
294
  }
295
-
296
  document.addEventListener('DOMContentLoaded', function() {
297
  recordButton = document.getElementById('record-button');
298
  audioElement = document.getElementById('audio-playback');
@@ -301,13 +344,11 @@ def custom_audio_recorder():
301
  recordButton.addEventListener('click', toggleRecording);
302
  });
303
  </script>
304
-
305
  <div class="audio-recorder-container">
306
  <button id="record-button" class="record-button">Start Recording</button>
307
  <audio id="audio-playback" controls style="display:block; margin-top:10px;"></audio>
308
  <input type="hidden" id="audio-data" name="audio-data">
309
  </div>
310
-
311
  <style>
312
  .audio-recorder-container {
313
  display: flex;
@@ -340,8 +381,16 @@ def custom_audio_recorder():
340
 
341
  # Function to display analysis results
342
  def display_analysis_results(transcribed_text):
 
 
 
 
343
  emotions_dict, top_emotion, emotion_map, sentiment = perform_emotion_detection(transcribed_text)
344
  is_sarcastic, sarcasm_score = perform_sarcasm_detection(transcribed_text)
 
 
 
 
345
 
346
  st.header("Transcribed Text")
347
  st.text_area("Text", transcribed_text, height=150, disabled=True, help="The audio converted to text.")
@@ -380,6 +429,17 @@ def display_analysis_results(transcribed_text):
380
  else:
381
  st.write("No emotions detected.")
382
 
 
 
 
 
 
 
 
 
 
 
 
383
  with st.expander("Analysis Details", expanded=False):
384
  st.write("""
385
  **How this works:**
@@ -416,6 +476,10 @@ def process_base64_audio(base64_data):
416
 
417
  # Main App Logic
418
  def main():
 
 
 
 
419
  tab1, tab2 = st.tabs(["πŸ“ Upload Audio", "πŸŽ™οΈ Record Audio"])
420
 
421
  with tab1:
@@ -487,4 +551,4 @@ def main():
487
  show_model_info()
488
 
489
  if __name__ == "__main__":
490
- main()
 
 
1
  import os
2
  import streamlit as st
3
  import tempfile
 
41
 
42
  def perform_emotion_detection(text):
43
  try:
44
+ if not text or len(text.strip()) < 3:
45
+ return {}, "neutral", {}, "NEUTRAL"
46
+
47
  emotion_classifier = get_emotion_classifier()
48
  emotion_results = emotion_classifier(text)[0]
49
 
 
63
  "embarrassment", "fear", "grief", "nervousness", "remorse", "sadness"]
64
  neutral_emotions = ["confusion", "curiosity", "realization", "surprise", "neutral"]
65
 
66
+ # Fix 1: Create a clean emotions dictionary from results
67
+ emotions_dict = {}
68
+ for result in emotion_results:
69
+ emotions_dict[result['label']] = result['score']
70
+
71
+ # Fix 2: Filter out very low scores (below threshold)
72
+ filtered_emotions = {k: v for k, v in emotions_dict.items() if v > 0.05}
73
+
74
+ # If filtered dictionary is empty, fall back to original
75
+ if not filtered_emotions:
76
+ filtered_emotions = emotions_dict
77
+
78
+ # Fix 3: Make sure we properly find the top emotion
79
+ top_emotion = max(filtered_emotions, key=filtered_emotions.get)
80
+ top_score = filtered_emotions[top_emotion]
81
 
82
+ # Fix 4: More robust sentiment assignment
83
  if top_emotion in positive_emotions:
84
  sentiment = "POSITIVE"
85
  elif top_emotion in negative_emotions:
86
  sentiment = "NEGATIVE"
87
  else:
88
+ # If the top emotion is neutral but there are strong competing emotions, use them
89
+ competing_emotions = sorted(filtered_emotions.items(), key=lambda x: x[1], reverse=True)[:3]
90
 
91
+ # Check if there's a close second non-neutral emotion
92
+ if len(competing_emotions) > 1:
93
+ if (competing_emotions[0][0] in neutral_emotions and
94
+ competing_emotions[1][0] not in neutral_emotions and
95
+ competing_emotions[1][1] > 0.7 * competing_emotions[0][1]):
96
+ # Use the second strongest emotion instead
97
+ top_emotion = competing_emotions[1][0]
98
+ if top_emotion in positive_emotions:
99
+ sentiment = "POSITIVE"
100
+ elif top_emotion in negative_emotions:
101
+ sentiment = "NEGATIVE"
102
+ else:
103
+ sentiment = "NEUTRAL"
104
+ else:
105
+ sentiment = "NEUTRAL"
106
+ else:
107
+ sentiment = "NEUTRAL"
108
+
109
+ # Log for debugging
110
+ print(f"Text: {text[:50]}...")
111
+ print(f"Top 3 emotions: {sorted(filtered_emotions.items(), key=lambda x: x[1], reverse=True)[:3]}")
112
+ print(f"Selected top emotion: {top_emotion} ({filtered_emotions.get(top_emotion, 0):.3f})")
113
+ print(f"Sentiment determined: {sentiment}")
114
+
115
  return emotions_dict, top_emotion, emotion_map, sentiment
116
  except Exception as e:
117
  st.error(f"Emotion detection failed: {str(e)}")
118
+ print(f"Exception in emotion detection: {str(e)}")
119
+ return {}, "neutral", {}, "NEUTRAL"
120
 
121
  # Sarcasm Detection Function
122
  @st.cache_resource
 
128
 
129
  def perform_sarcasm_detection(text):
130
  try:
131
+ if not text or len(text.strip()) < 3:
132
+ return False, 0.0
133
+
134
  sarcasm_classifier = get_sarcasm_classifier()
135
  result = sarcasm_classifier(text)[0]
136
  is_sarcastic = result['label'] == "LABEL_1"
 
299
  audioRecorder.streamBeingCaptured = null;
300
  }
301
  }
 
302
  var isRecording = false;
303
  var recordButton = document.getElementById('record-button');
304
  var audioElement = document.getElementById('audio-playback');
 
336
  });
337
  }
338
  }
 
339
  document.addEventListener('DOMContentLoaded', function() {
340
  recordButton = document.getElementById('record-button');
341
  audioElement = document.getElementById('audio-playback');
 
344
  recordButton.addEventListener('click', toggleRecording);
345
  });
346
  </script>
 
347
  <div class="audio-recorder-container">
348
  <button id="record-button" class="record-button">Start Recording</button>
349
  <audio id="audio-playback" controls style="display:block; margin-top:10px;"></audio>
350
  <input type="hidden" id="audio-data" name="audio-data">
351
  </div>
 
352
  <style>
353
  .audio-recorder-container {
354
  display: flex;
 
381
 
382
  # Function to display analysis results
383
  def display_analysis_results(transcribed_text):
384
+ # Fix 5: Add debugging to track what's happening
385
+ st.session_state.debug_info = st.session_state.get('debug_info', [])
386
+ st.session_state.debug_info.append(f"Processing text: {transcribed_text[:50]}...")
387
+
388
  emotions_dict, top_emotion, emotion_map, sentiment = perform_emotion_detection(transcribed_text)
389
  is_sarcastic, sarcasm_score = perform_sarcasm_detection(transcribed_text)
390
+
391
+ # Add results to debug info
392
+ st.session_state.debug_info.append(f"Top emotion: {top_emotion}, Sentiment: {sentiment}")
393
+ st.session_state.debug_info.append(f"Sarcasm: {is_sarcastic}, Score: {sarcasm_score:.3f}")
394
 
395
  st.header("Transcribed Text")
396
  st.text_area("Text", transcribed_text, height=150, disabled=True, help="The audio converted to text.")
 
429
  else:
430
  st.write("No emotions detected.")
431
 
432
+ # Fix 6: Add debug expander for troubleshooting
433
+ with st.expander("Debug Information", expanded=False):
434
+ st.write("Debugging information for troubleshooting:")
435
+ for i, debug_line in enumerate(st.session_state.debug_info[-10:]):
436
+ st.text(f"{i+1}. {debug_line}")
437
+ if emotions_dict:
438
+ st.write("Raw emotion scores:")
439
+ for emotion, score in sorted(emotions_dict.items(), key=lambda x: x[1], reverse=True):
440
+ if score > 0.01: # Only show non-negligible scores
441
+ st.text(f"{emotion}: {score:.4f}")
442
+
443
  with st.expander("Analysis Details", expanded=False):
444
  st.write("""
445
  **How this works:**
 
476
 
477
  # Main App Logic
478
  def main():
479
+ # Fix 7: Initialize session state for debugging
480
+ if 'debug_info' not in st.session_state:
481
+ st.session_state.debug_info = []
482
+
483
  tab1, tab2 = st.tabs(["πŸ“ Upload Audio", "πŸŽ™οΈ Record Audio"])
484
 
485
  with tab1:
 
551
  show_model_info()
552
 
553
  if __name__ == "__main__":
554
+ main()