Spaces:
Runtime error
Runtime error
File size: 2,270 Bytes
dade934 e3ed1f8 5fc096b e3ed1f8 5fc096b e3ed1f8 bdfc502 3c29a90 2d9f1e0 5fc096b 2d9f1e0 5fc096b bdfc502 e3ed1f8 9e5bff1 7a63bb5 2032d4e 5fc096b 2032d4e e3ed1f8 8c1b2ed fd876cd d647e93 32961aa 0e8a5c7 7cc8ae2 7a63bb5 63705fb f3340c3 8c1b2ed 618a8af 34b577e dade934 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import gradio as gr
import tensorflow as tf
import cv2
import numpy as np
# Load the saved model
model = tf.keras.models.load_model('model/model.h5')
# Define the face cascade and emotions
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
no_face_detection_alert = "Cannot Detect Face"
low_confidence_alert = "Cannot Detect Emotion"
# Define the predict_emotion function
for (x, y, w, h) in faces:
face = gray[y:y+h, x:x+w]
face = cv2.resize(face, (48, 48), interpolation = cv2.INTER_AREA)
if np.sum([face])!=0:
face = face.astype('float')/255.0
face = tf.keras.utils.img_to_array(face)
face = np.expand_dims(face, axis=0)
prediction = model.predict(face)
if any(prob >.5 for prob in prediction[0]):
emotion = emotions[np.argmax(prediction)]
cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (128, 128, 0), 2)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 0), 2)
else:
cv2.putText(frame, low_confidence_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255),
2)
else:
cv2.putText(frame, no_face_detection_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 69, 255), 2)
return frame
# Start the video capture and emotion detection
# cap = cv2.VideoCapture(0)
# while True:
# ret, frame = cap.read()
# if ret:
# frame = predict_emotion(frame)
# cv2.imshow('Live Facial Emotion Detection', frame)
# if cv2.waitKey(1) == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
input_image = gr.Image(source = "webcam", streaming = True, label="Your Face")
# video = gr.inputs.Video(source = "webcam" )
output_image = gr.Image( type="numpy", label="Detected Emotion" )
iface = gr.Interface(
fn = predict_emotion,
inputs=input_image,
outputs=output_image,
batch = True,
max_batch_size = 100000,
# interpretation = "default",
title = "Mood Detectives",
description = "Real-Time Emotion Detection Using Facial Expressions:\nCan our model detect if you are angry, happy, sad, fearful, disgusted, surprised or neutral?",
live = True
)
iface.queue(concurrency_count=1000)
iface.launch() |