yunusajib commited on
Commit
3baa918
·
verified ·
1 Parent(s): 5eff629

update code

Browse files
Files changed (1) hide show
  1. app.py +69 -4
app.py CHANGED
@@ -10,7 +10,6 @@ import onnxruntime as ort
10
  import requests
11
  import os
12
  from sklearn.preprocessing import StandardScaler
13
- import joblib
14
 
15
  # Download emotion recognition ONNX model
16
  MODEL_URL = "https://github.com/onnx/models/raw/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-8.onnx"
@@ -26,7 +25,7 @@ if not os.path.exists(MODEL_PATH):
26
  emotion_session = ort.InferenceSession(MODEL_PATH)
27
  emotion_labels = ['neutral', 'happy', 'surprise', 'sad', 'angry', 'disgust', 'fear', 'contempt']
28
 
29
- # Simple voice emotion classifier (replace with your own trained model if needed)
30
  class VoiceEmotionClassifier:
31
  def __init__(self):
32
  self.scaler = StandardScaler()
@@ -54,7 +53,6 @@ class VoiceEmotionClassifier:
54
  features = self.scaler.transform(features)
55
 
56
  # Simple rule-based classifier (replace with actual trained model)
57
- # This is just a placeholder - you should train a proper model
58
  if features[0, 0] > 0.5:
59
  return "happy", [{"label": "happy", "score": 0.8}]
60
  elif features[0, 0] < -0.5:
@@ -191,4 +189,71 @@ def process_input(video, audio):
191
  voice_emotion, voice_details = "neutral", {}
192
 
193
  # Update history and get outputs
194
- update_em
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  import requests
11
  import os
12
  from sklearn.preprocessing import StandardScaler
 
13
 
14
  # Download emotion recognition ONNX model
15
  MODEL_URL = "https://github.com/onnx/models/raw/main/vision/body_analysis/emotion_ferplus/model/emotion-ferplus-8.onnx"
 
25
  emotion_session = ort.InferenceSession(MODEL_PATH)
26
  emotion_labels = ['neutral', 'happy', 'surprise', 'sad', 'angry', 'disgust', 'fear', 'contempt']
27
 
28
+ # Simple voice emotion classifier
29
  class VoiceEmotionClassifier:
30
  def __init__(self):
31
  self.scaler = StandardScaler()
 
53
  features = self.scaler.transform(features)
54
 
55
  # Simple rule-based classifier (replace with actual trained model)
 
56
  if features[0, 0] > 0.5:
57
  return "happy", [{"label": "happy", "score": 0.8}]
58
  elif features[0, 0] < -0.5:
 
189
  voice_emotion, voice_details = "neutral", {}
190
 
191
  # Update history and get outputs
192
+ update_emotion_history(face_emotion, voice_emotion)
193
+ timeline_df = get_emotion_timeline()
194
+ advice = get_practitioner_advice(face_emotion, voice_emotion)
195
+
196
+ # Prepare outputs
197
+ outputs = {
198
+ "current_face": face_emotion,
199
+ "current_voice": voice_emotion,
200
+ "timeline": timeline_df,
201
+ "advice": advice,
202
+ "face_details": str(face_details),
203
+ "voice_details": str(voice_details)
204
+ }
205
+
206
+ return outputs
207
+ except Exception as e:
208
+ print(f"Processing error: {e}")
209
+ return {
210
+ "current_face": "Error",
211
+ "current_voice": "Error",
212
+ "timeline": pd.DataFrame(),
213
+ "advice": "System error occurred",
214
+ "face_details": "",
215
+ "voice_details": ""
216
+ }
217
+
218
+ # Gradio interface
219
+ with gr.Blocks(title="Patient Emotion Recognition", theme="soft") as demo:
220
+ gr.Markdown("# Real-Time Patient Emotion Recognition")
221
+ gr.Markdown("Analyze facial expressions and voice tone during medical consultations")
222
+
223
+ with gr.Row():
224
+ with gr.Column():
225
+ video_input = gr.Image(label="Live Camera Feed", source="webcam", streaming=True)
226
+ audio_input = gr.Audio(label="Voice Input", source="microphone", type="numpy")
227
+ submit_btn = gr.Button("Analyze Emotions")
228
+
229
+ with gr.Column():
230
+ current_face = gr.Textbox(label="Current Facial Emotion")
231
+ current_voice = gr.Textbox(label="Current Voice Emotion")
232
+ advice_output = gr.Textbox(label="Practitioner Suggestions", lines=3)
233
+ timeline_output = gr.Dataframe(label="Emotion Timeline", interactive=False)
234
+ face_details = gr.Textbox(label="Face Analysis Details", visible=False)
235
+ voice_details = gr.Textbox(label="Voice Analysis Details", visible=False)
236
+
237
+ # Live processing
238
+ video_input.change(
239
+ process_input,
240
+ inputs=[video_input, audio_input],
241
+ outputs=[current_face, current_voice, timeline_output, advice_output, face_details, voice_details],
242
+ show_progress="hidden"
243
+ )
244
+
245
+ audio_input.change(
246
+ process_input,
247
+ inputs=[video_input, audio_input],
248
+ outputs=[current_face, current_voice, timeline_output, advice_output, face_details, voice_details],
249
+ show_progress="hidden"
250
+ )
251
+
252
+ submit_btn.click(
253
+ process_input,
254
+ inputs=[video_input, audio_input],
255
+ outputs=[current_face, current_voice, timeline_output, advice_output, face_details, voice_details]
256
+ )
257
+
258
+ if __name__ == "__main__":
259
+ demo.launch(debug=True)