File size: 2,840 Bytes
df02b79
31d4b6e
3501c5c
 
31d4b6e
 
5c53740
3501c5c
 
 
990a101
3501c5c
 
990a101
3501c5c
 
 
 
 
 
31d4b6e
 
7f642aa
5900a6d
31d4b6e
1394c4f
df02b79
8604064
 
1d57e98
8604064
 
5900a6d
df02b79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5900a6d
 
 
 
 
 
df02b79
3c55658
 
 
 
 
 
 
 
 
df02b79
3c55658
 
 
19aa63a
3c55658
df02b79
 
 
 
 
 
 
 
 
 
 
 
1e52d4f
df02b79
5900a6d
df02b79
 
 
 
 
 
 
0c6b33b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr

#import sys
#import os
import pandas as pd
import numpy as np
#import cv2
#import matplotlib.pyplot as plt
#from PIL import Image
#import keras
#import tensorflow as tf
#from keras.models import Model
#from keras.optimizers import Adam
#from keras.applications.vgg16 import VGG16, preprocess_input
#from keras.applications.vgg19 import VGG19, preprocess_input
#from keras.preprocessing.image import ImageDataGenerator
#from keras.callbacks import ModelCheckpoint, EarlyStopping
#from keras.layers import Dense, Dropout, Flatten, MaxPooling2D, Conv2D
#from pathlib import Path
#from sklearn.metrics import accuracy_score


from keras.models import model_from_json
from keras.preprocessing import image
#from keras.applications.vgg16 import VGG16, preprocess_input
#import heapq

file = open("focusondriving.json", 'r')
model_json2 = file.read()
#file.close()
loaded_model = model_from_json(model_json2)
#loaded_model = model_from_json("focusondriving.json")
#loaded_model.load_weights("focusondriving.h5")

class_dict = {
    'c0': 'hands on the wheel',
    'c1': 'mobile in right hand',
    'c2': 'talking on the phone with right hand',
    'c3': "mobile in left hand",
    'c4': 'talking on the phone with left hand',
    'c5': 'touching at the dash',
    'c6': 'drinking',
    'c7': 'reaching behind',
    'c8': 'touching the head',
    'c9': 'looking to the side'
}

def predict_image(pic):
    img = image.load_img(pic, target_size=(224, 224))
    x = image.img_to_array(img) 
    x = np.expand_dims(x, axis=0)
    #x = preprocess_input(x)
    preds = loaded_model.predict(x)
    preds = list(preds[0])

    list_desc_order = heapq.nlargest(2, range(len(preds)), key=preds.__getitem__)
    result1 = f'c{list_desc_order[0]}'
    result2 = '-'
    result2_ = 0
    if preds[list_desc_order[1]] > 0.3:
        result2 = f'c{list_desc_order[1]}'
        result2_  = round(preds[list_desc_order[1]], 2)
    txt = f"category {directory} result 1 {result1} {round(preds[list_desc_order[0]],2)} | result2 {result2} {result2_}"
    txt = f"categoria {directory}"
    
    score = round(preds[list_desc_order[0]], 2)*100
    score = int(score)
    txt2 = f"resultado: {class_dict.get(result1)}     probabilidad {score}%"
    txt3="pepe"
    return txt2
    
    
iface = gr.Interface(
    predict_image,
    [
        
        gr.inputs.Image(source="upload",type="filepath", label="Imagen")
    ],

    "text",
    
    
    
    interpretation="default",
    title = 'FER - Facial Expression Recognitionllll',
    description = 'Probablemente nos daremos cuenta de que muchas veces se miente cuando se tratan las emociones, ¿pero nuestra cara también miente? https://saturdays.ai/2022/03/16/detectando-emociones-mediante-imagenes-con-inteligencia-artificial/ ',
    
    theme = 'grass'
 )


   
iface.launch(Debug=True)