Regino commited on
Commit
9ef5697
·
1 Parent(s): 6f5fce0
Files changed (2) hide show
  1. requirements.txt +4 -4
  2. src/streamlit_app.py +28 -15
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- # requirements.txt
2
  streamlit
3
  opencv-python
4
- tensorflow # or tensorflow-cpu
5
  numpy
6
- streamlit-webrtc # <-- NEW
7
- # Add any other libraries your app uses
 
 
 
1
  streamlit
2
  opencv-python
3
+ tensorflow-cpu # <-- Use this if your Hugging Face Space is CPU-only. Otherwise, keep 'tensorflow'.
4
  numpy
5
+ streamlit-webrtc
6
+ av
7
+ Pillow
src/streamlit_app.py CHANGED
@@ -80,7 +80,7 @@ class EmotionDetector(VideoTransformerBase):
80
  def transform(self, frame: av.VideoFrame) -> np.ndarray:
81
  # Convert av.VideoFrame to NumPy array.
82
  # Requesting "bgr24" format directly from `av` to align with OpenCV's default.
83
- img_bgr = frame.to_ndarray(format="bgr24") # <--- MODIFIED TO BGR24
84
 
85
  # Convert to grayscale for face detection and emotion prediction
86
  gray_frame = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
@@ -97,7 +97,7 @@ class EmotionDetector(VideoTransformerBase):
97
  x_orig = int(x / FACE_DETECTION_DOWNSCALE)
98
  y_orig = int(y / FACE_DETECTION_DOWNSCALE)
99
  w_orig = int(w / FACE_DETECTION_DOWNSCALE)
100
- h_orig = int(h / FACE_DETECTION_DOWNSCALE) # Corrected potential typo here if original was h_orig / h_orig
101
  original_faces.append((x_orig, y_orig, w_orig, h_orig))
102
 
103
  # Process each detected face
@@ -149,19 +149,32 @@ webrtc_ctx = webrtc_streamer(
149
  mode=WebRtcMode.SENDRECV, # Send video from client, receive processed video from server
150
  video_processor_factory=lambda: EmotionDetector(model, face_detector),
151
  media_stream_constraints={"video": True, "audio": False}, # Only video, no audio
152
- async_processing=True, # Process frames asynchronously
153
- # desired_playing_state={"playing": True}, # Optional: tries to auto-start. Can comment out.
154
-
155
- # --- ADD THIS LINE HERE ---
156
- rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]},
157
- # You can add more STUN servers for redundancy if needed, like:
158
- # rtc_configuration={
159
- # "iceServers": [
160
- # {"urls": ["stun:stun.l.google.com:19302"]},
161
- # {"urls": ["stun:stun1.l.google.com:19302"]},
162
- # {"urls": ["stun:stun.services.mozilla.com"]}
163
- # ]
164
- # },
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  )
166
 
167
  # Provide feedback based on the stream state
 
80
  def transform(self, frame: av.VideoFrame) -> np.ndarray:
81
  # Convert av.VideoFrame to NumPy array.
82
  # Requesting "bgr24" format directly from `av` to align with OpenCV's default.
83
+ img_bgr = frame.to_ndarray(format="bgr24")
84
 
85
  # Convert to grayscale for face detection and emotion prediction
86
  gray_frame = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
 
97
  x_orig = int(x / FACE_DETECTION_DOWNSCALE)
98
  y_orig = int(y / FACE_DETECTION_DOWNSCALE)
99
  w_orig = int(w / FACE_DETECTION_DOWNSCALE)
100
+ h_orig = int(h / FACE_DETECTION_DOWNSCALE)
101
  original_faces.append((x_orig, y_orig, w_orig, h_orig))
102
 
103
  # Process each detected face
 
149
  mode=WebRtcMode.SENDRECV, # Send video from client, receive processed video from server
150
  video_processor_factory=lambda: EmotionDetector(model, face_detector),
151
  media_stream_constraints={"video": True, "audio": False}, # Only video, no audio
152
+
153
+ # Temporarily set async_processing to False for debugging.
154
+ # If this works, it points to an asyncio interaction issue. For better performance,
155
+ # you'd ideally re-enable async_processing=True later.
156
+ async_processing=False,
157
+
158
+ # Optional: tries to auto-start. Can comment out if you prefer manual start.
159
+ # desired_playing_state={"playing": True},
160
+
161
+ # --- ENHANCED RTC CONFIGURATION ---
162
+ # Providing a robust list of public STUN servers for better NAT traversal
163
+ rtc_configuration={
164
+ "iceServers": [
165
+ {"urls": ["stun:stun.l.google.com:19302"]},
166
+ {"urls": ["stun:stun1.l.google.com:19302"]},
167
+ {"urls": ["stun:stun2.l.google.com:19302"]},
168
+ {"urls": ["stun:stun3.l.google.com:19302"]},
169
+ {"urls": ["stun:stun4.l.google.com:19302"]},
170
+ {"urls": ["stun:stun.services.mozilla.com"]},
171
+ {"urls": ["stun:global.stun.twilio.com:3478"]},
172
+ {"urls": ["stun:stun.nextcloud.com:3478"]}, # Added another
173
+ {"urls": ["stun:stun.schlund.de"]}, # Added another
174
+ ]
175
+ },
176
+ # For more detailed debugging in the browser console, uncomment the line below.
177
+ # log_level="debug",
178
  )
179
 
180
  # Provide feedback based on the stream state