MaroofTechSorcerer commited on
Commit
f9389b4
Β·
verified Β·
1 Parent(s): 521070e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -14
app.py CHANGED
@@ -34,16 +34,25 @@ st.write("Analyze all 27 emotions from uploaded audio with enhanced detection to
34
  # Audio Preprocessing
35
  def make_audio_scarier(audio_path, output_path):
36
  try:
37
- commands = [
38
- f"ffmpeg -i {audio_path} -af 'asetrate=44100*0.8,aresample=44100' temp1.wav",
39
- f"ffmpeg -i temp1.wav -af 'reverb=0.8:0.2:0.5:0.5:0.5:0.5' temp2.wav",
40
- f"ffmpeg -i temp2.wav -af 'atempo=1.2' {output_path}"
41
- ]
42
- for cmd in commands:
43
- subprocess.run(cmd, shell=True, check=True)
 
 
 
 
 
 
44
  for temp_file in ["temp1.wav", "temp2.wav"]:
45
  if os.path.exists(temp_file):
46
  os.remove(temp_file)
 
 
 
47
  except Exception as e:
48
  st.error(f"Audio processing failed: {str(e)}")
49
  raise
@@ -83,9 +92,9 @@ def perform_audio_emotion_detection(audio_path):
83
  # Enhanced boosting based on audio features
84
  features = extract_audio_features(audio_path)
85
  if features.get("pitch_mean", 0) < 200 and features.get("energy_mean", 0) > 0.1 and features.get("zcr_mean", 0) > 0.1:
86
- emotion_dict["fearful"] = min(1.0, emotion_dict.get("fearful", 0) + 0.4) # Increased boost
87
  top_emotion = "fearful" if emotion_dict["fearful"] > emotion_dict[top_emotion] else top_emotion
88
- elif features.get("energy_mean", 0) > 0.25: # Stricter threshold
89
  emotion_dict["angry"] = min(1.0, emotion_dict.get("angry", 0) + 0.35)
90
  top_emotion = "angry" if emotion_dict["angry"] > emotion_dict[top_emotion] else top_emotion
91
  elif features.get("pitch_mean", 0) > 500 and features.get("energy_mean", 0) < 0.05:
@@ -99,7 +108,7 @@ def perform_audio_emotion_detection(audio_path):
99
  top_emotion = "surprise" if emotion_dict["surprise"] > emotion_dict[top_emotion] else top_emotion
100
  # Fallback to avoid neutral if score is low
101
  if emotion_dict["neutral"] > 0.5 and max([v for k, v in emotion_dict.items() if k != "neutral"]) > 0.3:
102
- emotion_dict["neutral"] = max(0.0, emotion_dict["neutral"] - 0.2) # Reduce neutral weight
103
  top_emotion = max(emotion_dict, key=emotion_dict.get)
104
  return emotion_dict, top_emotion
105
  except Exception as e:
@@ -124,7 +133,6 @@ def perform_text_emotion_detection(text):
124
  "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral"]
125
  emotions_dict = {result['label']: result['score'] for result in results if result['label'] in emotions}
126
  top_emotion = max(emotions_dict, key=emotions_dict.get)
127
- # Reduce neutral influence if other emotions are strong
128
  if emotions_dict.get("neutral", 0) > 0.5 and max([v for k, v in emotions_dict.items() if k != "neutral"]) > 0.4:
129
  emotions_dict["neutral"] = max(0.0, emotions_dict["neutral"] - 0.15)
130
  top_emotion = max(emotions_dict, key=emotions_dict.get)
@@ -237,7 +245,7 @@ def display_analysis_results(audio_path):
237
  # Avoid neutral if other emotions are competitive
238
  top_emotion = max(combined_emotions, key=combined_emotions.get)
239
  if combined_emotions["neutral"] > 0.5 and max([v for k, v in combined_emotions.items() if k != "neutral"]) > 0.4:
240
- combined_emotions["neutral"] = max(0.0, combined_emotions["neutral"] - 0.25) # Stronger reduction
241
  top_emotion = max(combined_emotions, key=combined_emotions.get)
242
 
243
  sentiment = "POSITIVE" if top_emotion in ["admiration", "amusement", "approval", "caring", "desire", "excitement",
@@ -254,7 +262,7 @@ def display_analysis_results(audio_path):
254
  st.markdown(f"**{sentiment_icon} {sentiment.capitalize()}** (Based on {top_emotion})")
255
  st.subheader("Sarcasm")
256
  sarcasm_icon = "😏" if is_sarcastic else "😐"
257
- st.markdown(f"**{sarcasm_icon} {'Detected' if is_sarcastic else 'Not Detected'}** (Score: {sarcasm_score:.3f})")
258
 
259
  with col2:
260
  st.subheader("Emotion Distribution")
@@ -304,4 +312,5 @@ def main():
304
  """)
305
 
306
  if __name__ == "__main__":
307
- main()
 
 
34
  # Audio Preprocessing
35
  def make_audio_scarier(audio_path, output_path):
36
  try:
37
+ # Step 1: Adjust pitch (slower rate for scarier effect)
38
+ cmd1 = f"ffmpeg -i {audio_path} -af 'asetrate=44100*0.8,aresample=44100' temp1.wav"
39
+ subprocess.run(cmd1, shell=True, check=True, stderr=subprocess.PIPE, text=True)
40
+
41
+ # Step 2: Apply reverb with adjusted parameters
42
+ cmd2 = f"ffmpeg -i temp1.wav -af 'reverb=0.4:0.7:0.5:0.5:0.5:0.02' temp2.wav"
43
+ subprocess.run(cmd2, shell=True, check=True, stderr=subprocess.PIPE, text=True)
44
+
45
+ # Step 3: Adjust tempo
46
+ cmd3 = f"ffmpeg -i temp2.wav -af 'atempo=1.2' {output_path}"
47
+ subprocess.run(cmd3, shell=True, check=True, stderr=subprocess.PIPE, text=True)
48
+
49
+ # Clean up temporary files
50
  for temp_file in ["temp1.wav", "temp2.wav"]:
51
  if os.path.exists(temp_file):
52
  os.remove(temp_file)
53
+ except subprocess.CalledProcessError as e:
54
+ st.error(f"Audio processing failed: {str(e)} - Command: {e.cmd}, Output: {e.stderr}")
55
+ raise
56
  except Exception as e:
57
  st.error(f"Audio processing failed: {str(e)}")
58
  raise
 
92
  # Enhanced boosting based on audio features
93
  features = extract_audio_features(audio_path)
94
  if features.get("pitch_mean", 0) < 200 and features.get("energy_mean", 0) > 0.1 and features.get("zcr_mean", 0) > 0.1:
95
+ emotion_dict["fearful"] = min(1.0, emotion_dict.get("fearful", 0) + 0.4)
96
  top_emotion = "fearful" if emotion_dict["fearful"] > emotion_dict[top_emotion] else top_emotion
97
+ elif features.get("energy_mean", 0) > 0.25:
98
  emotion_dict["angry"] = min(1.0, emotion_dict.get("angry", 0) + 0.35)
99
  top_emotion = "angry" if emotion_dict["angry"] > emotion_dict[top_emotion] else top_emotion
100
  elif features.get("pitch_mean", 0) > 500 and features.get("energy_mean", 0) < 0.05:
 
108
  top_emotion = "surprise" if emotion_dict["surprise"] > emotion_dict[top_emotion] else top_emotion
109
  # Fallback to avoid neutral if score is low
110
  if emotion_dict["neutral"] > 0.5 and max([v for k, v in emotion_dict.items() if k != "neutral"]) > 0.3:
111
+ emotion_dict["neutral"] = max(0.0, emotion_dict["neutral"] - 0.2)
112
  top_emotion = max(emotion_dict, key=emotion_dict.get)
113
  return emotion_dict, top_emotion
114
  except Exception as e:
 
133
  "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral"]
134
  emotions_dict = {result['label']: result['score'] for result in results if result['label'] in emotions}
135
  top_emotion = max(emotions_dict, key=emotions_dict.get)
 
136
  if emotions_dict.get("neutral", 0) > 0.5 and max([v for k, v in emotions_dict.items() if k != "neutral"]) > 0.4:
137
  emotions_dict["neutral"] = max(0.0, emotions_dict["neutral"] - 0.15)
138
  top_emotion = max(emotions_dict, key=emotions_dict.get)
 
245
  # Avoid neutral if other emotions are competitive
246
  top_emotion = max(combined_emotions, key=combined_emotions.get)
247
  if combined_emotions["neutral"] > 0.5 and max([v for k, v in combined_emotions.items() if k != "neutral"]) > 0.4:
248
+ combined_emotions["neutral"] = max(0.0, combined_emotions["neutral"] - 0.25)
249
  top_emotion = max(combined_emotions, key=combined_emotions.get)
250
 
251
  sentiment = "POSITIVE" if top_emotion in ["admiration", "amusement", "approval", "caring", "desire", "excitement",
 
262
  st.markdown(f"**{sentiment_icon} {sentiment.capitalize()}** (Based on {top_emotion})")
263
  st.subheader("Sarcasm")
264
  sarcasm_icon = "😏" if is_sarcastic else "😐"
265
+ st.markdown(f"**{sarc_icon} {'Detected' if is_sarcastic else 'Not Detected'}** (Score: {sarcasm_score:.3f})")
266
 
267
  with col2:
268
  st.subheader("Emotion Distribution")
 
312
  """)
313
 
314
  if __name__ == "__main__":
315
+ main()
316
+