Upload 9 files
Browse files- angry1.png +0 -0
- angry2.jpg +0 -0
- app.py +56 -0
- emotions_vgg1.pkl +3 -0
- happy1.jpg +0 -0
- happy2.jpg +0 -0
- neutral1.jpg +0 -0
- neutral2.jpg +0 -0
- requirements.txt +1 -0
angry1.png
ADDED
|
angry2.jpg
ADDED
|
app.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Facial expression classifier
|
| 2 |
+
import os
|
| 3 |
+
from fastai.vision.all import *
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
# Emotion
|
| 7 |
+
learn_emotion = load_learner('emotions_vgg19.pkl')
|
| 8 |
+
learn_emotion_labels = learn_emotion.dls.vocab
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Predict
|
| 12 |
+
def predict(img):
|
| 13 |
+
img = PILImage.create(img)
|
| 14 |
+
pred_emotion, pred_emotion_idx, probs_emotion = learn_emotion.predict(img)
|
| 15 |
+
predicted_emotion = learn_emotion_labels[pred_emotion_idx]
|
| 16 |
+
return predicted_emotion
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Gradio
|
| 20 |
+
title = "Facial Emotion Detector"
|
| 21 |
+
|
| 22 |
+
description = gr.Markdown(
|
| 23 |
+
"""Ever wondered what a person might be feeling looking at their picture?
|
| 24 |
+
Well, now you can! Try this fun app. Just upload a facial image in JPG or
|
| 25 |
+
PNG format. You can now see what they might have felt when the picture
|
| 26 |
+
was taken.
|
| 27 |
+
|
| 28 |
+
**Tip**: Be sure to only include face to get best results. Check some sample images
|
| 29 |
+
below for inspiration!""").value
|
| 30 |
+
|
| 31 |
+
article = gr.Markdown(
|
| 32 |
+
"""**DISCLAIMER:** This model does not reveal the actual emotional state of a person. Use and
|
| 33 |
+
interpret results at your own risk!.
|
| 34 |
+
|
| 35 |
+
**PREMISE:** The idea is to determine an overall emotion of a person
|
| 36 |
+
based on the pictures. We are restricting pictures to only include close-up facial
|
| 37 |
+
images.
|
| 38 |
+
|
| 39 |
+
**DATA:** FER2013 dataset consists of 48x48 pixel grayscale images of faces.Images
|
| 40 |
+
are assigned one of the 7 emotions: Angry, Disgust, Fear, Happy, Sad, Surprise, and Neutral.
|
| 41 |
+
|
| 42 |
+
""").value
|
| 43 |
+
|
| 44 |
+
enable_queue=True
|
| 45 |
+
|
| 46 |
+
examples = ['happy1.jpg', 'happy2.jpg', 'angry1.png', 'angry2.jpg', 'neutral1.jpg', 'neutral2.jpg']
|
| 47 |
+
|
| 48 |
+
gr.Interface(fn = predict,
|
| 49 |
+
inputs = gr.Image( image_mode='L'),
|
| 50 |
+
outputs = [gr.Label(label='Emotion')], #gr.Label(),
|
| 51 |
+
title = title,
|
| 52 |
+
examples = examples,
|
| 53 |
+
description = description,
|
| 54 |
+
article=article,
|
| 55 |
+
allow_flagging='never').launch()
|
| 56 |
+
|
emotions_vgg1.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:335570ccf5133bc1fc0d4cc9bb7835b72fd457a64c0cdefbbc863f1c24cc4454
|
| 3 |
+
size 82965753
|
happy1.jpg
ADDED
|
happy2.jpg
ADDED
|
neutral1.jpg
ADDED
|
neutral2.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
fastai
|