Regino commited on
Commit
500c313
·
1 Parent(s): 8b43db0
Files changed (5) hide show
  1. README.md +2 -19
  2. Video_Presentation.txt +1 -0
  3. app(for local).py +174 -0
  4. requirements.txt +4 -1
  5. src/streamlit_app.py +101 -114
README.md CHANGED
@@ -1,20 +1,3 @@
1
- ---
2
- title: FinalProject
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
- pinned: false
11
- short_description: Real-time Facial Emotion Recognition System
12
- license: mit
13
- ---
14
 
15
- # Welcome to Streamlit!
16
-
17
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
18
-
19
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
20
- forums](https://discuss.streamlit.io).
 
1
+ Need To adjust things to be able to run in Huggingface
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ If you want to run locally, please use the app(for local).py
 
 
 
 
 
Video_Presentation.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://www.canva.com/design/DAGoMR6EmvM/Q0SFJu1gnHcW7B6oMkNn5A/watch?utm_content=DAGoMR6EmvM&utm_campaign=share_your_design&utm_medium=link2&utm_source=shareyourdesignpanel
app(for local).py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ import streamlit as st
4
+ import cv2
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ import time
8
+ import os
9
+
10
+ # --- Streamlit Page Configuration (MUST BE THE FIRST STREAMLIT COMMAND) ---
11
+ st.set_page_config(page_title="Real-time Emotion Recognition", layout="wide")
12
+
13
+ # --- 1. Load Model and Face Detector (Cached for Performance) ---
14
+
15
+ @st.cache_resource
16
+ def load_emotion_model():
17
+ model_path = 'models/emotion_model_best.h5' # Path to your trained model
18
+ if not os.path.exists(model_path):
19
+ st.error(f"Error: Model file not found at {model_path}. Please ensure training was successful and the file exists.")
20
+ st.stop()
21
+ try:
22
+ model = tf.keras.models.load_model(model_path)
23
+ return model
24
+ except Exception as e:
25
+ st.error(f"Error loading model from {model_path}: {e}")
26
+ st.stop()
27
+
28
+ @st.cache_resource
29
+ def load_face_detector():
30
+ cascade_path = 'haarcascade_frontalface_default.xml' # Path to your Haar Cascade file
31
+ if not os.path.exists(cascade_path):
32
+ st.error(f"Error: Haar Cascade file not found at {cascade_path}.")
33
+ st.markdown("Please download `haarcascade_frontalface_default.xml` from:")
34
+ st.markdown("[https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml](https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml)")
35
+ st.markdown("And place it in a `cascades` folder next to `app.py`.")
36
+ st.stop()
37
+ face_cascade = cv2.CascadeClassifier(cascade_path)
38
+ if face_cascade.empty():
39
+ st.error(f"Error: Could not load Haar Cascade classifier from {cascade_path}. Check file integrity.")
40
+ st.stop()
41
+ return face_cascade
42
+
43
+ # Load the model and face detector when the app starts
44
+ model = load_emotion_model()
45
+ face_detector = load_face_detector()
46
+
47
+ # --- 2. Define Constants and Labels ---
48
+ IMG_HEIGHT = 48
49
+ IMG_WIDTH = 48
50
+ emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
51
+
52
+ label_colors = {
53
+ 'angry': (0, 0, 255), # BGR Red
54
+ 'disgust': (0, 165, 255), # BGR Orange
55
+ 'fear': (0, 255, 255), # BGR Yellow
56
+ 'happy': (0, 255, 0), # BGR Green
57
+ 'neutral': (255, 255, 0), # BGR Cyan
58
+ 'sad': (255, 0, 0), # BGR Blue
59
+ 'surprise': (255, 0, 255) # BGR Magenta
60
+ }
61
+
62
+ # --- 3. Streamlit App Layout ---
63
+ st.title("Live Facial Emotion Recognition")
64
+
65
+ st.markdown("""
66
+ This application uses a deep learning model (trained on FER-2013) to detect emotions from faces in real-time.
67
+ It requires access to your computer's webcam.
68
+ """)
69
+
70
+ stframe = st.empty()
71
+ st_status = st.empty()
72
+
73
+ col1, col2 = st.columns([1,1])
74
+ with col1:
75
+ start_button = st.button("Start Camera", key="start_camera")
76
+ with col2:
77
+ stop_button = st.button("Stop Camera", key="stop_camera")
78
+
79
+ # Initialize session state for camera control and performance tracking
80
+ if "camera_started" not in st.session_state:
81
+ st.session_state.camera_started = False
82
+ if "cap" not in st.session_state:
83
+ st.session_state.cap = None
84
+ if "last_process_time" not in st.session_state:
85
+ st.session_state.last_process_time = 0.0
86
+
87
+ # --- Performance Configuration ---
88
+ DESIRED_FPS = 15 # Aim for 15 frames per second for processing
89
+ FRAME_INTERVAL_SECONDS = 1.0 / DESIRED_FPS
90
+ FACE_DETECTION_DOWNSCALE = 0.5 # Scale factor for face detection (e.g., 0.5 means half size)
91
+
92
+ # --- 4. Main Camera Loop Logic ---
93
+
94
+ if start_button:
95
+ st.session_state.camera_started = True
96
+
97
+ if stop_button:
98
+ st.session_state.camera_started = False
99
+ st_status.info("Camera stopped.")
100
+ if st.session_state.cap is not None and st.session_state.cap.isOpened():
101
+ st.session_state.cap.release()
102
+ st.session_session.cap = None
103
+ stframe.empty()
104
+ # Updated: use_container_width instead of use_column_width
105
+ stframe.image(np.zeros((480, 640, 3), dtype=np.uint8), channels="RGB", use_container_width=True)
106
+
107
+ if st.session_state.camera_started:
108
+ st_status.info("Starting camera... Please allow camera access if prompted.")
109
+
110
+ if st.session_state.cap is None or not st.session_state.cap.isOpened():
111
+ st.session_state.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
112
+ if not st.session_state.cap.isOpened():
113
+ st_status.error("Failed to open camera. Please check if it's connected and not in use.")
114
+ st.session_state.camera_started = False
115
+ st.stop()
116
+
117
+ while st.session_state.camera_started:
118
+ ret, frame = st.session_state.cap.read()
119
+ if not ret:
120
+ st_status.error("Failed to read frame from camera. It might be disconnected or an error occurred.")
121
+ st.session_state.camera_started = False
122
+ break
123
+
124
+ current_time = time.time()
125
+ if current_time - st.session_state.last_process_time >= FRAME_INTERVAL_SECONDS:
126
+ st.session_state.last_process_time = current_time
127
+
128
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
129
+
130
+ small_frame = cv2.resize(gray_frame, (0, 0), fx=FACE_DETECTION_DOWNSCALE, fy=FACE_DETECTION_DOWNSCALE)
131
+
132
+ faces = face_detector.detectMultiScale(small_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
133
+
134
+ original_faces = []
135
+ for (x, y, w, h) in faces:
136
+ x_orig = int(x / FACE_DETECTION_DOWNSCALE)
137
+ y_orig = int(y / FACE_DETECTION_DOWNSCALE)
138
+ w_orig = int(w / FACE_DETECTION_DOWNSCALE)
139
+ h_orig = int(h / FACE_DETECTION_DOWNSCALE)
140
+ original_faces.append((x_orig, y_orig, w_orig, h_orig))
141
+
142
+ for (x, y, w, h) in original_faces:
143
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
144
+
145
+ face_roi = gray_frame[max(0, y):min(gray_frame.shape[0], y+h), max(0, x):min(gray_frame.shape[1], x+w)]
146
+
147
+ if face_roi.size == 0:
148
+ continue
149
+
150
+ face_roi = cv2.resize(face_roi, (IMG_WIDTH, IMG_HEIGHT))
151
+ face_roi = np.expand_dims(face_roi, axis=0)
152
+ face_roi = np.expand_dims(face_roi, axis=-1)
153
+ face_roi = face_roi / 255.0
154
+
155
+ predictions = model.predict(face_roi, verbose=0)[0]
156
+ emotion_index = np.argmax(predictions)
157
+ predicted_emotion = emotion_labels[emotion_index]
158
+ confidence = predictions[emotion_index] * 100
159
+
160
+ text_color = label_colors.get(predicted_emotion, (255, 255, 255))
161
+ text = f"{predicted_emotion} ({confidence:.2f}%)"
162
+ text_y = y - 10 if y - 10 > 10 else y + h + 20
163
+ cv2.putText(frame, text, (x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.9, text_color, 2, cv2.LINE_AA)
164
+
165
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
166
+ # Updated: use_container_width instead of use_column_width
167
+ stframe.image(frame_rgb, channels="RGB", use_container_width=True)
168
+
169
+ time.sleep(0.001) # Small sleep to yield control, can be adjusted or removed
170
+
171
+ if st.session_state.cap is not None and st.session_state.cap.isOpened():
172
+ st.session_state.cap.release()
173
+ st.session_state.cap = None
174
+ st_status.info("Camera released.")
requirements.txt CHANGED
@@ -1,4 +1,7 @@
 
1
  streamlit
2
  opencv-python
 
3
  numpy
4
- tensorflow
 
 
1
+ # requirements.txt
2
  streamlit
3
  opencv-python
4
+ tensorflow # or tensorflow-cpu
5
  numpy
6
+ streamlit-webrtc # <-- NEW
7
+ # Add any other libraries your app uses
src/streamlit_app.py CHANGED
@@ -4,8 +4,9 @@ import streamlit as st
4
  import cv2
5
  import numpy as np
6
  import tensorflow as tf
7
- import time
8
  import os
 
 
9
 
10
  # --- Streamlit Page Configuration (MUST BE THE FIRST STREAMLIT COMMAND) ---
11
  st.set_page_config(page_title="Real-time Emotion Recognition", layout="wide")
@@ -14,9 +15,14 @@ st.set_page_config(page_title="Real-time Emotion Recognition", layout="wide")
14
 
15
  @st.cache_resource
16
  def load_emotion_model():
17
- model_path = 'models/emotion_model_best.h5' # Path to your trained model
 
 
 
 
 
18
  if not os.path.exists(model_path):
19
- st.error(f"Error: Model file not found at {model_path}. Please ensure training was successful and the file exists.")
20
  st.stop()
21
  try:
22
  model = tf.keras.models.load_model(model_path)
@@ -27,12 +33,15 @@ def load_emotion_model():
27
 
28
  @st.cache_resource
29
  def load_face_detector():
30
- cascade_path = 'haarcascade_frontalface_default.xml' # Path to your Haar Cascade file
 
 
 
 
31
  if not os.path.exists(cascade_path):
32
  st.error(f"Error: Haar Cascade file not found at {cascade_path}.")
33
- st.markdown("Please download `haarcascade_frontalface_default.xml` from:")
34
- st.markdown("[https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml](https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml)")
35
- st.markdown("And place it in a `cascades` folder next to `app.py`.")
36
  st.stop()
37
  face_cascade = cv2.CascadeClassifier(cascade_path)
38
  if face_cascade.empty():
@@ -59,116 +68,94 @@ label_colors = {
59
  'surprise': (255, 0, 255) # BGR Magenta
60
  }
61
 
62
- # --- 3. Streamlit App Layout ---
63
- st.title("Live Facial Emotion Recognition")
64
-
65
- st.markdown("""
66
- This application uses a deep learning model (trained on FER-2013) to detect emotions from faces in real-time.
67
- It requires access to your computer's webcam.
68
- """)
69
-
70
- stframe = st.empty()
71
- st_status = st.empty()
72
-
73
- col1, col2 = st.columns([1,1])
74
- with col1:
75
- start_button = st.button("Start Camera", key="start_camera")
76
- with col2:
77
- stop_button = st.button("Stop Camera", key="stop_camera")
78
-
79
- # Initialize session state for camera control and performance tracking
80
- if "camera_started" not in st.session_state:
81
- st.session_state.camera_started = False
82
- if "cap" not in st.session_state:
83
- st.session_state.cap = None
84
- if "last_process_time" not in st.session_state:
85
- st.session_state.last_process_time = 0.0
86
-
87
- # --- Performance Configuration ---
88
- DESIRED_FPS = 15 # Aim for 15 frames per second for processing
89
- FRAME_INTERVAL_SECONDS = 1.0 / DESIRED_FPS
90
- FACE_DETECTION_DOWNSCALE = 0.5 # Scale factor for face detection (e.g., 0.5 means half size)
91
-
92
- # --- 4. Main Camera Loop Logic ---
93
-
94
- if start_button:
95
- st.session_state.camera_started = True
96
-
97
- if stop_button:
98
- st.session_state.camera_started = False
99
- st_status.info("Camera stopped.")
100
- if st.session_state.cap is not None and st.session_state.cap.isOpened():
101
- st.session_state.cap.release()
102
- st.session_session.cap = None
103
- stframe.empty()
104
- # Updated: use_container_width instead of use_column_width
105
- stframe.image(np.zeros((480, 640, 3), dtype=np.uint8), channels="RGB", use_container_width=True)
106
-
107
- if st.session_state.camera_started:
108
- st_status.info("Starting camera... Please allow camera access if prompted.")
109
-
110
- if st.session_state.cap is None or not st.session_state.cap.isOpened():
111
- st.session_state.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
112
- if not st.session_state.cap.isOpened():
113
- st_status.error("Failed to open camera. Please check if it's connected and not in use.")
114
- st.session_state.camera_started = False
115
- st.stop()
116
-
117
- while st.session_state.camera_started:
118
- ret, frame = st.session_state.cap.read()
119
- if not ret:
120
- st_status.error("Failed to read frame from camera. It might be disconnected or an error occurred.")
121
- st.session_state.camera_started = False
122
- break
123
-
124
- current_time = time.time()
125
- if current_time - st.session_state.last_process_time >= FRAME_INTERVAL_SECONDS:
126
- st.session_state.last_process_time = current_time
127
-
128
- gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
129
-
130
- small_frame = cv2.resize(gray_frame, (0, 0), fx=FACE_DETECTION_DOWNSCALE, fy=FACE_DETECTION_DOWNSCALE)
131
 
132
- faces = face_detector.detectMultiScale(small_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
133
-
134
- original_faces = []
135
- for (x, y, w, h) in faces:
136
- x_orig = int(x / FACE_DETECTION_DOWNSCALE)
137
- y_orig = int(y / FACE_DETECTION_DOWNSCALE)
138
- w_orig = int(w / FACE_DETECTION_DOWNSCALE)
139
- h_orig = int(h / FACE_DETECTION_DOWNSCALE)
140
- original_faces.append((x_orig, y_orig, w_orig, h_orig))
141
-
142
- for (x, y, w, h) in original_faces:
143
- cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
144
-
145
- face_roi = gray_frame[max(0, y):min(gray_frame.shape[0], y+h), max(0, x):min(gray_frame.shape[1], x+w)]
146
-
147
- if face_roi.size == 0:
148
- continue
149
-
150
- face_roi = cv2.resize(face_roi, (IMG_WIDTH, IMG_HEIGHT))
151
- face_roi = np.expand_dims(face_roi, axis=0)
152
- face_roi = np.expand_dims(face_roi, axis=-1)
153
- face_roi = face_roi / 255.0
154
 
155
- predictions = model.predict(face_roi, verbose=0)[0]
156
- emotion_index = np.argmax(predictions)
157
- predicted_emotion = emotion_labels[emotion_index]
158
- confidence = predictions[emotion_index] * 100
159
 
160
- text_color = label_colors.get(predicted_emotion, (255, 255, 255))
161
- text = f"{predicted_emotion} ({confidence:.2f}%)"
162
- text_y = y - 10 if y - 10 > 10 else y + h + 20
163
- cv2.putText(frame, text, (x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.9, text_color, 2, cv2.LINE_AA)
164
 
165
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
166
- # Updated: use_container_width instead of use_column_width
167
- stframe.image(frame_rgb, channels="RGB", use_container_width=True)
 
168
 
169
- time.sleep(0.001) # Small sleep to yield control, can be adjusted or removed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
- if st.session_state.cap is not None and st.session_state.cap.isOpened():
172
- st.session_state.cap.release()
173
- st.session_state.cap = None
174
- st_status.info("Camera released.")
 
4
  import cv2
5
  import numpy as np
6
  import tensorflow as tf
 
7
  import os
8
+ from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, WebRtcMode
9
+ import av # Part of streamlit-webrtc's dependencies for frame handling
10
 
11
  # --- Streamlit Page Configuration (MUST BE THE FIRST STREAMLIT COMMAND) ---
12
  st.set_page_config(page_title="Real-time Emotion Recognition", layout="wide")
 
15
 
16
  @st.cache_resource
17
  def load_emotion_model():
18
+ # Path to your trained model.
19
+ # In a Docker container, the app's working directory will be /app.
20
+ # So if your models folder is at /app/models, then 'models/...' is correct.
21
+ # Ensure your Dockerfile copies the 'models' folder correctly.
22
+ model_path = 'models/emotion_model_best.h5'
23
+
24
  if not os.path.exists(model_path):
25
+ st.error(f"Error: Model file not found at {model_path}. Please ensure it's copied into the Docker image and path is correct.")
26
  st.stop()
27
  try:
28
  model = tf.keras.models.load_model(model_path)
 
33
 
34
  @st.cache_resource
35
  def load_face_detector():
36
+ # Path to your Haar Cascade file.
37
+ # Ensure 'haarcascade_frontalface_default.xml' is in the root of your project
38
+ # directory (which is copied to /app in Docker) for this path to be correct.
39
+ cascade_path = 'haarcascade_frontalface_default.xml'
40
+
41
  if not os.path.exists(cascade_path):
42
  st.error(f"Error: Haar Cascade file not found at {cascade_path}.")
43
+ st.markdown("Please ensure `haarcascade_frontalface_default.xml` is in the root of your project directory alongside `src/` and `models/`.")
44
+ st.markdown("Download from: [https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml](https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml)")
 
45
  st.stop()
46
  face_cascade = cv2.CascadeClassifier(cascade_path)
47
  if face_cascade.empty():
 
68
  'surprise': (255, 0, 255) # BGR Magenta
69
  }
70
 
71
+ FACE_DETECTION_DOWNSCALE = 0.5 # Scale factor for face detection
72
+
73
+ # --- 3. Video Processing Class ---
74
+ # This class will receive frames from the client and process them on the server
75
+ class EmotionDetector(VideoTransformerBase):
76
+ def __init__(self, model, face_detector):
77
+ self.model = model
78
+ self.face_detector = face_detector
79
+
80
+ def transform(self, frame: av.VideoFrame) -> np.ndarray:
81
+ # Convert av.VideoFrame to NumPy array.
82
+ # Requesting "bgr24" format directly from `av` to align with OpenCV's default.
83
+ img_bgr = frame.to_ndarray(format="bgr24") # <--- MODIFIED TO BGR24
84
+
85
+ # Convert to grayscale for face detection and emotion prediction
86
+ gray_frame = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
87
+
88
+ # Scale down for faster face detection
89
+ small_frame = cv2.resize(gray_frame, (0, 0), fx=FACE_DETECTION_DOWNSCALE, fy=FACE_DETECTION_DOWNSCALE)
90
+
91
+ # Detect faces
92
+ faces = self.face_detector.detectMultiScale(small_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
93
+
94
+ # Scale face coordinates back to original frame size
95
+ original_faces = []
96
+ for (x, y, w, h) in faces:
97
+ x_orig = int(x / FACE_DETECTION_DOWNSCALE)
98
+ y_orig = int(y / FACE_DETECTION_DOWNSCALE)
99
+ w_orig = int(w / FACE_DETECTION_DOWNSCALE)
100
+ h_orig = int(h / FACE_DETECTION_DOWNSCALE) # Corrected potential typo here if original was h_orig / h_orig
101
+ original_faces.append((x_orig, y_orig, w_orig, h_orig))
102
+
103
+ # Process each detected face
104
+ for (x, y, w, h) in original_faces:
105
+ # Draw rectangle on the BGR image (img_bgr)
106
+ cv2.rectangle(img_bgr, (x, y), (x+w, y+h), (255, 0, 0), 2)
107
+
108
+ # Extract face ROI for emotion prediction
109
+ # Ensure ROI coordinates are within image bounds
110
+ face_roi = gray_frame[max(0, y):min(gray_frame.shape[0], y+h), max(0, x):min(gray_frame.shape[1], x+w)]
111
+
112
+ if face_roi.size == 0: # Skip if ROI is empty (e.g., face partially out of frame)
113
+ continue
114
+
115
+ face_roi = cv2.resize(face_roi, (IMG_WIDTH, IMG_HEIGHT))
116
+ face_roi = np.expand_dims(face_roi, axis=0) # Add batch dimension
117
+ face_roi = np.expand_dims(face_roi, axis=-1) # Add channel dimension (for grayscale)
118
+ face_roi = face_roi / 255.0 # Normalize pixel values
119
+
120
+ predictions = self.model.predict(face_roi, verbose=0)[0]
121
+ emotion_index = np.argmax(predictions)
122
+ predicted_emotion = emotion_labels[emotion_index]
123
+ confidence = predictions[emotion_index] * 100
124
+
125
+ text_color = label_colors.get(predicted_emotion, (255, 255, 255))
126
+ text = f"{predicted_emotion} ({confidence:.2f}%)"
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ # Position text above face, or below if not enough space above
129
+ text_y = y - 10 if y - 10 > 10 else y + h + 20
130
+
131
+ # Draw text on the BGR image (img_bgr)
132
+ cv2.putText(img_bgr, text, (x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.9, text_color, 2, cv2.LINE_AA)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
+ # Convert the processed BGR image back to RGB for Streamlit/WebRTC display
135
+ return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
 
 
136
 
137
+ # --- 4. Streamlit App Layout and WebRTC Stream ---
138
+ st.title("Live Facial Emotion Recognition")
 
 
139
 
140
+ st.markdown("""
141
+ This application uses a deep learning model to detect emotions from faces in real-time.
142
+ It accesses your webcam directly via your browser (WebRTC) and processes the video frames on the server.
143
+ """)
144
 
145
+ # Place the webrtc_streamer widget.
146
+ # It automatically renders a video player and "Connect" / "Disconnect" buttons.
147
+ webrtc_ctx = webrtc_streamer(
148
+ key="emotion_detection_stream",
149
+ mode=WebRtcMode.SENDRECV, # Send video from client, receive processed video from server
150
+ video_processor_factory=lambda: EmotionDetector(model, face_detector),
151
+ media_stream_constraints={"video": True, "audio": False}, # Only video, no audio
152
+ async_processing=True, # Process frames asynchronously
153
+ # desired_playing_state={"playing": True}, # Optional: tries to auto-start. Can comment out.
154
+ )
155
+
156
+ # Provide feedback based on the stream state
157
+ if webrtc_ctx.state.playing:
158
+ st.success("Webcam stream active. Looking for faces...")
159
+ else:
160
+ st.info("Webcam stream not active. Click the 'Start' button above to begin, and allow camera access.")
161