yunusajib commited on
Commit
d17fe0c
·
verified ·
1 Parent(s): f158a4a

emotional app

Browse files
Files changed (1) hide show
  1. app.py +65 -25
app.py CHANGED
@@ -1,43 +1,83 @@
1
  import gradio as gr
2
  import numpy as np
 
 
3
 
4
- class AudioProcessor:
5
  def __init__(self):
6
  self.sample_rate = 16000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- def process_audio(self, audio_input):
9
- # This now has properly matched parentheses
10
- return (np.random.random(self.sample_rate * 3), self.sample_rate) # Both parentheses are closed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def create_interface():
13
- processor = AudioProcessor()
14
 
15
- def process_audio(audio):
16
  try:
17
- audio_data, sr = processor.process_audio(audio)
18
- status_msg = f"Processed {len(audio_data)} samples"
19
- info_msg = f"{sr}Hz, {len(audio_data)/sr:.2f} seconds"
20
- return status_msg, info_msg # Two separate strings
21
  except Exception as e:
22
- return f"Error: {str(e)}", "No info available" # Still two values
23
 
24
- with gr.Blocks() as demo:
25
- gr.Markdown("## Fixed Audio Processor")
26
-
27
- mic = gr.Audio(sources=["microphone"], type="filepath")
28
- btn = gr.Button("Process Audio")
29
 
30
- status = gr.Textbox(label="Status")
31
- info = gr.Textbox(label="Audio Info")
 
 
 
 
 
 
 
32
 
33
- btn.click(
34
- fn=process_audio,
35
- inputs=[mic],
36
- outputs=[status, info]
37
  )
38
 
39
- return demo
40
 
41
  if __name__ == "__main__":
42
- demo = create_interface()
43
- demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
+ import time
4
+ from datetime import datetime
5
 
6
+ class EmotionRecognizer:
7
  def __init__(self):
8
  self.sample_rate = 16000
9
+ self.emotion_history = []
10
+
11
+ def analyze_audio(self, audio_data):
12
+ # Mock audio analysis - replace with your actual model
13
+ emotions = {
14
+ 'happy': np.random.random() * 0.5,
15
+ 'sad': np.random.random() * 0.3,
16
+ 'angry': np.random.random() * 0.2,
17
+ 'neutral': np.random.random() * 0.5
18
+ }
19
+ return emotions
20
+
21
+ def analyze_image(self, image):
22
+ # Mock image analysis - replace with your actual model
23
+ emotions = {
24
+ 'happy': np.random.random() * 0.6,
25
+ 'confused': np.random.random() * 0.4,
26
+ 'pain': np.random.random() * 0.3,
27
+ 'neutral': np.random.random() * 0.5
28
+ }
29
+ return emotions
30
 
31
+ def process_inputs(self, video_frame, audio_data):
32
+ # Get current timestamp
33
+ timestamp = datetime.now().strftime("%H:%M:%S")
34
+
35
+ # Process inputs (mock implementation)
36
+ audio_emotions = self.analyze_audio(audio_data) if audio_data else {}
37
+ visual_emotions = self.analyze_image(video_frame) if video_frame else {}
38
+
39
+ # Combine results
40
+ combined = {**audio_emotions, **visual_emotions}
41
+ self.emotion_history.append((timestamp, combined))
42
+
43
+ # Generate outputs
44
+ top_emotion = max(combined.items(), key=lambda x: x[1]) if combined else ('none', 0)
45
+ stats = f"Top emotion: {top_emotion[0]} ({top_emotion[1]:.2f})"
46
+ history = "\n".join([f"{t}: {e}" for t, e in self.emotion_history[-3:]])
47
+
48
+ return stats, history
49
 
50
  def create_interface():
51
+ recognizer = EmotionRecognizer()
52
 
53
+ def process_frame(video_frame, audio_data):
54
  try:
55
+ stats, history = recognizer.process_inputs(video_frame, audio_data)
56
+ return stats, history
 
 
57
  except Exception as e:
58
+ return f"Error: {str(e)}", "No history available"
59
 
60
+ with gr.Blocks(title="Emotion Recognition", theme=gr.themes.Soft()) as app:
61
+ gr.Markdown("# Patient Emotion Recognition System")
 
 
 
62
 
63
+ with gr.Row():
64
+ with gr.Column():
65
+ video_input = gr.Image(sources=["webcam"], label="Video Feed")
66
+ audio_input = gr.Audio(sources=["microphone"], label="Audio Input")
67
+ process_btn = gr.Button("Analyze", variant="primary")
68
+
69
+ with gr.Column():
70
+ stats_output = gr.Textbox(label="Current Analysis")
71
+ history_output = gr.Textbox(label="Recent History", lines=4)
72
 
73
+ process_btn.click(
74
+ fn=process_frame,
75
+ inputs=[video_input, audio_input],
76
+ outputs=[stats_output, history_output]
77
  )
78
 
79
+ return app
80
 
81
  if __name__ == "__main__":
82
+ app = create_interface()
83
+ app.launch()