maxinethegreat commited on
Commit
0dad691
·
1 Parent(s): f3340c3

test live detection

Browse files
Files changed (1) hide show
  1. app.py +19 -15
app.py CHANGED
@@ -13,19 +13,23 @@ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_fronta
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
15
  # Define the predict_emotion function
16
- def predict_emotion(frame):
17
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
18
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
19
- for (x, y, w, h) in faces:
20
- face = gray[y:y+h, x:x+w]
21
- face = cv2.resize(face, (48, 48))
22
- face = np.expand_dims(face, axis=-1)
23
- face = np.expand_dims(face, axis=0)
24
- prediction = model.predict(face)
25
- emotion = emotions[np.argmax(prediction)]
26
- cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
27
- cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
28
- return frame
 
 
 
 
29
 
30
  # Start the video capture and emotion detection
31
  # cap = cv2.VideoCapture(0)
@@ -40,10 +44,10 @@ def predict_emotion(frame):
40
  # cv2.destroyAllWindows()
41
 
42
 
43
- input_image = gr.inputs.Image(source = "webcam", label="Your Face")
44
  # video = gr.inputs.Video(source = "webcam" )
45
 
46
- output_image = gr.inputs.Image(type = "numpy", label="Detected Emotion" )
47
 
48
 
49
 
 
13
  emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
14
 
15
  # Define the predict_emotion function
16
+ def predict_emotion(vid):
17
+ cap = cv2.VideoCapture(vid)
18
+ # while True:
19
+ ret, frame = cap.read()
20
+ if ret:
21
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
22
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
23
+ for (x, y, w, h) in faces:
24
+ face = gray[y:y+h, x:x+w]
25
+ face = cv2.resize(face, (48, 48))
26
+ face = np.expand_dims(face, axis=-1)
27
+ face = np.expand_dims(face, axis=0)
28
+ prediction = model.predict(face)
29
+ emotion = emotions[np.argmax(prediction)]
30
+ cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
31
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
32
+ return frame
33
 
34
  # Start the video capture and emotion detection
35
  # cap = cv2.VideoCapture(0)
 
44
  # cv2.destroyAllWindows()
45
 
46
 
47
+ input_image = gr.inputs.Video(source = "webcam", label="Your Face")
48
  # video = gr.inputs.Video(source = "webcam" )
49
 
50
+ output_image = gr.inputs.Image( label="Detected Emotion" )
51
 
52
 
53