Regino commited on
Commit
2f58ee5
·
1 Parent(s): 103895d

first commit

Browse files
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
models/emotion_model_best.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32bc4d63ae296293c472c3861da4af54c21f8c2432646f0a23a9a8b53b6ea255
3
+ size 17770192
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
- altair
2
- pandas
3
- streamlit
 
 
1
+ streamlit
2
+ opencv-python
3
+ numpy
4
+ tensorflow
src/streamlit_app.py CHANGED
@@ -1,40 +1,174 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
1
+ # app.py
2
+
 
3
  import streamlit as st
4
+ import cv2
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ import time
8
+ import os
9
+
10
+ # --- Streamlit Page Configuration (MUST BE THE FIRST STREAMLIT COMMAND) ---
11
+ st.set_page_config(page_title="Real-time Emotion Recognition", layout="wide")
12
+
13
+ # --- 1. Load Model and Face Detector (Cached for Performance) ---
14
+
15
+ @st.cache_resource
16
+ def load_emotion_model():
17
+ model_path = 'models/emotion_model_best.h5' # Path to your trained model
18
+ if not os.path.exists(model_path):
19
+ st.error(f"Error: Model file not found at {model_path}. Please ensure training was successful and the file exists.")
20
+ st.stop()
21
+ try:
22
+ model = tf.keras.models.load_model(model_path)
23
+ return model
24
+ except Exception as e:
25
+ st.error(f"Error loading model from {model_path}: {e}")
26
+ st.stop()
27
+
28
+ @st.cache_resource
29
+ def load_face_detector():
30
+ cascade_path = 'haarcascade_frontalface_default.xml' # Path to your Haar Cascade file
31
+ if not os.path.exists(cascade_path):
32
+ st.error(f"Error: Haar Cascade file not found at {cascade_path}.")
33
+ st.markdown("Please download `haarcascade_frontalface_default.xml` from:")
34
+ st.markdown("[https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml](https://github.com/opencv/opencv/blob/4.x/data/haarcascades/haarcascade_frontalface_default.xml)")
35
+ st.markdown("And place it in a `cascades` folder next to `app.py`.")
36
+ st.stop()
37
+ face_cascade = cv2.CascadeClassifier(cascade_path)
38
+ if face_cascade.empty():
39
+ st.error(f"Error: Could not load Haar Cascade classifier from {cascade_path}. Check file integrity.")
40
+ st.stop()
41
+ return face_cascade
42
+
43
+ # Load the model and face detector when the app starts
44
+ model = load_emotion_model()
45
+ face_detector = load_face_detector()
46
+
47
+ # --- 2. Define Constants and Labels ---
48
+ IMG_HEIGHT = 48
49
+ IMG_WIDTH = 48
50
+ emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
51
+
52
+ label_colors = {
53
+ 'angry': (0, 0, 255), # BGR Red
54
+ 'disgust': (0, 165, 255), # BGR Orange
55
+ 'fear': (0, 255, 255), # BGR Yellow
56
+ 'happy': (0, 255, 0), # BGR Green
57
+ 'neutral': (255, 255, 0), # BGR Cyan
58
+ 'sad': (255, 0, 0), # BGR Blue
59
+ 'surprise': (255, 0, 255) # BGR Magenta
60
+ }
61
+
62
+ # --- 3. Streamlit App Layout ---
63
+ st.title("Live Facial Emotion Recognition")
64
+
65
+ st.markdown("""
66
+ This application uses a deep learning model (trained on FER-2013) to detect emotions from faces in real-time.
67
+ It requires access to your computer's webcam.
68
+ """)
69
+
70
+ stframe = st.empty()
71
+ st_status = st.empty()
72
+
73
+ col1, col2 = st.columns([1,1])
74
+ with col1:
75
+ start_button = st.button("Start Camera", key="start_camera")
76
+ with col2:
77
+ stop_button = st.button("Stop Camera", key="stop_camera")
78
+
79
+ # Initialize session state for camera control and performance tracking
80
+ if "camera_started" not in st.session_state:
81
+ st.session_state.camera_started = False
82
+ if "cap" not in st.session_state:
83
+ st.session_state.cap = None
84
+ if "last_process_time" not in st.session_state:
85
+ st.session_state.last_process_time = 0.0
86
+
87
+ # --- Performance Configuration ---
88
+ DESIRED_FPS = 15 # Aim for 15 frames per second for processing
89
+ FRAME_INTERVAL_SECONDS = 1.0 / DESIRED_FPS
90
+ FACE_DETECTION_DOWNSCALE = 0.5 # Scale factor for face detection (e.g., 0.5 means half size)
91
+
92
+ # --- 4. Main Camera Loop Logic ---
93
+
94
+ if start_button:
95
+ st.session_state.camera_started = True
96
+
97
+ if stop_button:
98
+ st.session_state.camera_started = False
99
+ st_status.info("Camera stopped.")
100
+ if st.session_state.cap is not None and st.session_state.cap.isOpened():
101
+ st.session_state.cap.release()
102
+ st.session_session.cap = None
103
+ stframe.empty()
104
+ # Updated: use_container_width instead of use_column_width
105
+ stframe.image(np.zeros((480, 640, 3), dtype=np.uint8), channels="RGB", use_container_width=True)
106
+
107
+ if st.session_state.camera_started:
108
+ st_status.info("Starting camera... Please allow camera access if prompted.")
109
+
110
+ if st.session_state.cap is None or not st.session_state.cap.isOpened():
111
+ st.session_state.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
112
+ if not st.session_state.cap.isOpened():
113
+ st_status.error("Failed to open camera. Please check if it's connected and not in use.")
114
+ st.session_state.camera_started = False
115
+ st.stop()
116
+
117
+ while st.session_state.camera_started:
118
+ ret, frame = st.session_state.cap.read()
119
+ if not ret:
120
+ st_status.error("Failed to read frame from camera. It might be disconnected or an error occurred.")
121
+ st.session_state.camera_started = False
122
+ break
123
+
124
+ current_time = time.time()
125
+ if current_time - st.session_state.last_process_time >= FRAME_INTERVAL_SECONDS:
126
+ st.session_state.last_process_time = current_time
127
+
128
+ gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
129
+
130
+ small_frame = cv2.resize(gray_frame, (0, 0), fx=FACE_DETECTION_DOWNSCALE, fy=FACE_DETECTION_DOWNSCALE)
131
+
132
+ faces = face_detector.detectMultiScale(small_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
133
+
134
+ original_faces = []
135
+ for (x, y, w, h) in faces:
136
+ x_orig = int(x / FACE_DETECTION_DOWNSCALE)
137
+ y_orig = int(y / FACE_DETECTION_DOWNSCALE)
138
+ w_orig = int(w / FACE_DETECTION_DOWNSCALE)
139
+ h_orig = int(h / FACE_DETECTION_DOWNSCALE)
140
+ original_faces.append((x_orig, y_orig, w_orig, h_orig))
141
+
142
+ for (x, y, w, h) in original_faces:
143
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
144
+
145
+ face_roi = gray_frame[max(0, y):min(gray_frame.shape[0], y+h), max(0, x):min(gray_frame.shape[1], x+w)]
146
+
147
+ if face_roi.size == 0:
148
+ continue
149
+
150
+ face_roi = cv2.resize(face_roi, (IMG_WIDTH, IMG_HEIGHT))
151
+ face_roi = np.expand_dims(face_roi, axis=0)
152
+ face_roi = np.expand_dims(face_roi, axis=-1)
153
+ face_roi = face_roi / 255.0
154
+
155
+ predictions = model.predict(face_roi, verbose=0)[0]
156
+ emotion_index = np.argmax(predictions)
157
+ predicted_emotion = emotion_labels[emotion_index]
158
+ confidence = predictions[emotion_index] * 100
159
+
160
+ text_color = label_colors.get(predicted_emotion, (255, 255, 255))
161
+ text = f"{predicted_emotion} ({confidence:.2f}%)"
162
+ text_y = y - 10 if y - 10 > 10 else y + h + 20
163
+ cv2.putText(frame, text, (x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.9, text_color, 2, cv2.LINE_AA)
164
+
165
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
166
+ # Updated: use_container_width instead of use_column_width
167
+ stframe.image(frame_rgb, channels="RGB", use_container_width=True)
168
+
169
+ time.sleep(0.001) # Small sleep to yield control, can be adjusted or removed
170
 
171
+ if st.session_state.cap is not None and st.session_state.cap.isOpened():
172
+ st.session_state.cap.release()
173
+ st.session_state.cap = None
174
+ st_status.info("Camera released.")