File size: 2,865 Bytes
df02b79
31d4b6e
3501c5c
 
31d4b6e
 
5c53740
3501c5c
 
 
990a101
3501c5c
 
990a101
3501c5c
 
 
 
 
 
31d4b6e
 
7f642aa
5900a6d
6efb943
2b3c38f
df02b79
8604064
 
1d57e98
8604064
 
0549f6a
df02b79
 
2339e41
 
 
 
 
 
 
 
 
 
df02b79
 
 
5900a6d
 
 
6efb943
5900a6d
 
df02b79
3c55658
 
 
 
 
 
 
80b4637
 
df02b79
3c55658
 
 
19aa63a
3c55658
df02b79
 
 
 
 
 
 
 
 
 
 
 
1e52d4f
df02b79
5900a6d
df02b79
 
 
 
 
 
 
b5f6374
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr

#import sys
#import os
import pandas as pd
import numpy as np
#import cv2
#import matplotlib.pyplot as plt
#from PIL import Image
#import keras
#import tensorflow as tf
#from keras.models import Model
#from keras.optimizers import Adam
#from keras.applications.vgg16 import VGG16, preprocess_input
#from keras.applications.vgg19 import VGG19, preprocess_input
#from keras.preprocessing.image import ImageDataGenerator
#from keras.callbacks import ModelCheckpoint, EarlyStopping
#from keras.layers import Dense, Dropout, Flatten, MaxPooling2D, Conv2D
#from pathlib import Path
#from sklearn.metrics import accuracy_score


from keras.models import model_from_json
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16, preprocess_input
import heapq

file = open("focusondriving.json", 'r')
model_json2 = file.read()
#file.close()
loaded_model = model_from_json(model_json2)
#loaded_model = model_from_json("focusondriving.json")
loaded_model.load_weights("focusondriving.h5")

class_dict = {
    'c0': 'Manos en el volante',
    'c1': 'Móvil en la mano derecha',
    'c2': 'Hablando por el teléfono con la mano derecha',
    'c3': "Móvil en la mano izquierda",
    'c4': 'Hablando con el teléfono con la mano izquierda',
    'c5': 'Tocando el salpicadero',
    'c6': 'Bebiendo',
    'c7': 'Buscando detrás',
    'c8': 'Tocándose la cabeza',
    'c9': 'Mirando al lado'
}

def predict_image(pic):
    img = image.load_img(pic, target_size=(224, 224))
    x = image.img_to_array(img) 
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = loaded_model.predict(x)
    preds = list(preds[0])

    list_desc_order = heapq.nlargest(2, range(len(preds)), key=preds.__getitem__)
    result1 = f'c{list_desc_order[0]}'
    result2 = '-'
    result2_ = 0
    if preds[list_desc_order[1]] > 0.3:
        result2 = f'c{list_desc_order[1]}'
        result2_  = round(preds[list_desc_order[1]], 2)
    #txt = f"category {directory} result 1 {result1} {round(preds[list_desc_order[0]],2)} | result2 {result2} {result2_}"
    #txt = f"categoria {directory}"
    
    score = round(preds[list_desc_order[0]], 2)*100
    score = int(score)
    txt2 = f"resultado: {class_dict.get(result1)}     probabilidad {score}%"
    txt3="pepe"
    return txt2
    
    
iface = gr.Interface(
    predict_image,
    [
        
        gr.inputs.Image(source="upload",type="filepath", label="Imagen")
    ],

    "text",
    
    
    
    interpretation="default",
    title = 'FER - Facial Expression Recognitionllll',
    description = 'Probablemente nos daremos cuenta de que muchas veces se miente cuando se tratan las emociones, ¿pero nuestra cara también miente? https://saturdays.ai/2022/03/16/detectando-emociones-mediante-imagenes-con-inteligencia-artificial/ ',
    
    theme = 'grass'
 )


   
iface.launch()