Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| #import sys | |
| #import os | |
| import pandas as pd | |
| import numpy as np | |
| #import cv2 | |
| #import matplotlib.pyplot as plt | |
| #from PIL import Image | |
| #import keras | |
| #import tensorflow as tf | |
| #from keras.models import Model | |
| #from keras.optimizers import Adam | |
| #from keras.applications.vgg16 import VGG16, preprocess_input | |
| #from keras.applications.vgg19 import VGG19, preprocess_input | |
| #from keras.preprocessing.image import ImageDataGenerator | |
| #from keras.callbacks import ModelCheckpoint, EarlyStopping | |
| #from keras.layers import Dense, Dropout, Flatten, MaxPooling2D, Conv2D | |
| #from pathlib import Path | |
| #from sklearn.metrics import accuracy_score | |
| from keras.models import model_from_json | |
| #from keras.preprocessing import image | |
| #from keras.applications.vgg16 import VGG16, preprocess_input | |
| #import heapq | |
| file = open("focusondriving.json", 'r') | |
| model_json2 = file.read() | |
| file.close() | |
| loaded_model = model_from_json(model_json2) | |
| #loaded_model.load_weights("focusondriving.h5") | |
| class_dict = { | |
| 'c0': 'hands on the wheel', | |
| 'c1': 'mobile in right hand', | |
| 'c2': 'talking on the phone with right hand', | |
| 'c3': "mobile in left hand", | |
| 'c4': 'talking on the phone with left hand', | |
| 'c5': 'touching at the dash', | |
| 'c6': 'drinking', | |
| 'c7': 'reaching behind', | |
| 'c8': 'touching the head', | |
| 'c9': 'looking to the side' | |
| } | |
| def predict_image(pic): | |
| # img = image.load_img(pic, target_size=(224, 224)) | |
| # x = image.img_to_array(img) | |
| # x = np.expand_dims(x, axis=0) | |
| # x = preprocess_input(x) | |
| # preds = loaded_model.predict(x) | |
| # preds = list(preds[0]) | |
| #list_desc_order = heapq.nlargest(2, range(len(preds)), key=preds.__getitem__) | |
| #result1 = f'c{list_desc_order[0]}' | |
| #result2 = '-' | |
| #result2_ = 0 | |
| #if preds[list_desc_order[1]] > 0.3: | |
| # result2 = f'c{list_desc_order[1]}' | |
| # result2_ = round(preds[list_desc_order[1]], 2) | |
| #txt = f"category {directory} result 1 {result1} {round(preds[list_desc_order[0]],2)} | result2 {result2} {result2_}" | |
| #txt = f"categoria {directory}" | |
| #score = round(preds[list_desc_order[0]], 2)*100 | |
| #score = int(score) | |
| #txt2 = f"resultado: {class_dict.get(result1)} probabilidad {score}%" | |
| txt3="pepe" | |
| return txt3 | |
| iface = gr.Interface( | |
| predict_image, | |
| [ | |
| gr.inputs.Image(source="upload",type="filepath", label="Imagen") | |
| ], | |
| "text", | |
| interpretation="default", | |
| title = 'FER - Facial Expression Recognition', | |
| description = 'Probablemente nos daremos cuenta de que muchas veces se miente cuando se tratan las emociones, ¿pero nuestra cara también miente? https://saturdays.ai/2022/03/16/detectando-emociones-mediante-imagenes-con-inteligencia-artificial/ ', | |
| theme = 'grass' | |
| ) | |
| iface.launch() |