Diggz10 commited on
Commit
ccaa441
·
verified ·
1 Parent(s): 8f88515

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -22
app.py CHANGED
@@ -3,7 +3,6 @@ from transformers import pipeline
3
  import soundfile as sf
4
  import os
5
 
6
- # --- Model Loading ---
7
  try:
8
  classifier = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er")
9
  except Exception as e:
@@ -11,36 +10,23 @@ except Exception as e:
11
  return {"error": f"Failed to load the model. Please check the logs. Error: {str(e)}"}
12
  classifier = None
13
 
14
- # --- Prediction Function ---
15
  def predict_emotion(audio_file):
16
- if classifier is None:
17
- return {"error": "The AI model could not be loaded."}
18
- if audio_file is None:
19
- return {"error": "No audio input provided."}
20
-
21
- if isinstance(audio_file, str):
22
- audio_path = audio_file
23
  elif isinstance(audio_file, tuple):
24
  sample_rate, audio_array = audio_file
25
  temp_audio_path = "temp_audio_from_mic.wav"
26
  sf.write(temp_audio_path, audio_array, sample_rate)
27
  audio_path = temp_audio_path
28
- else:
29
- return {"error": f"Invalid audio input format: {type(audio_file)}"}
30
-
31
  try:
32
  results = classifier(audio_path, top_k=5)
33
- emotion_scores = {item['label']: round(item['score'], 3) for item in results}
34
- return emotion_scores
35
- except Exception as e:
36
- return {"error": f"An error occurred during prediction: {str(e)}"}
37
  finally:
38
- if 'temp_audio_path' in locals() and os.path.exists(temp_audio_path):
39
- os.remove(temp_audio_path)
40
-
41
 
42
- # --- Gradio Interface ---
43
- # We have REMOVED the api_name parameter to revert to the default endpoint.
44
  iface = gr.Interface(
45
  fn=predict_emotion,
46
  inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="Upload Audio or Record with Microphone"),
@@ -49,6 +35,5 @@ iface = gr.Interface(
49
  description="Upload an audio file or record your voice to detect emotions.",
50
  )
51
 
52
- # Launch the Gradio app with the API queue enabled
53
  if __name__ == "__main__":
54
  iface.queue().launch()
 
3
  import soundfile as sf
4
  import os
5
 
 
6
  try:
7
  classifier = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er")
8
  except Exception as e:
 
10
  return {"error": f"Failed to load the model. Please check the logs. Error: {str(e)}"}
11
  classifier = None
12
 
 
13
  def predict_emotion(audio_file):
14
+ if classifier is None: return {"error": "The AI model could not be loaded."}
15
+ if audio_file is None: return {"error": "No audio input provided."}
16
+ if isinstance(audio_file, str): audio_path = audio_file
 
 
 
 
17
  elif isinstance(audio_file, tuple):
18
  sample_rate, audio_array = audio_file
19
  temp_audio_path = "temp_audio_from_mic.wav"
20
  sf.write(temp_audio_path, audio_array, sample_rate)
21
  audio_path = temp_audio_path
22
+ else: return {"error": f"Invalid audio input format: {type(audio_file)}"}
 
 
23
  try:
24
  results = classifier(audio_path, top_k=5)
25
+ return {item['label']: round(item['score'], 3) for item in results}
26
+ except Exception as e: return {"error": f"An error occurred during prediction: {str(e)}"}
 
 
27
  finally:
28
+ if 'temp_audio_path' in locals() and os.path.exists(temp_audio_path): os.remove(temp_audio_path)
 
 
29
 
 
 
30
  iface = gr.Interface(
31
  fn=predict_emotion,
32
  inputs=gr.Audio(sources=["microphone", "upload"], type="filepath", label="Upload Audio or Record with Microphone"),
 
35
  description="Upload an audio file or record your voice to detect emotions.",
36
  )
37
 
 
38
  if __name__ == "__main__":
39
  iface.queue().launch()