Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """ | |
| Created on Sat Dec 3 18:31:26 2022 | |
| @author: gabri | |
| """ | |
| import numpy as np | |
| import tensorflow as tf | |
| import gradio as gr | |
| from huggingface_hub import from_pretrained_keras | |
| import cv2 | |
| import requests | |
| from PIL import Image | |
| import matplotlib.cm as cm | |
| # import matplotlib.pyplot as plt | |
| models_links = { | |
| 'xception':r'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%205/pesos.h5', | |
| 'resnet':r'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%209/pesos.h5', | |
| 'efficientnet':'https://huggingface.co/gabri14el/grapevine_classification/resolve/main/experimentos/classificacao/Experimento%2010/pesos.h5'} | |
| model_weights = { | |
| } | |
| model_last_convolutional_layer = { | |
| 'xception':'block14_sepconv2_act', | |
| 'resnet':'conv5_block3_3_conv', | |
| 'efficientnet':'top_conv'} | |
| classes = ['Códega', 'Moscatel Galego', 'Rabigato', 'Tinta Roriz', 'Tinto Cao', 'Touriga Nacional'] | |
| # functions for inference | |
| target_size_dimension = 300 | |
| def define_model(model): | |
| weights = get_weights(model) | |
| if model == 'efficientnet': | |
| preprocessing_function=tf.keras.applications.efficientnet.preprocess_input | |
| model = tf.keras.applications.EfficientNetB3( | |
| include_top=False, | |
| input_shape= (target_size_dimension, target_size_dimension, 3), | |
| weights='imagenet', | |
| pooling='avg' | |
| ) | |
| elif model == 'resnet': | |
| preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input | |
| model = tf.keras.applications.resnet_v2.ResNet101V2( | |
| include_top=False, | |
| input_shape= (target_size_dimension, target_size_dimension, 3), | |
| weights='imagenet', | |
| pooling='avg' | |
| ) | |
| else: | |
| preprocessing_function=tf.keras.applications.xception.preprocess_input | |
| model = tf.keras.applications.Xception( | |
| include_top=False, | |
| input_shape= (target_size_dimension, target_size_dimension, 3), | |
| weights='imagenet', | |
| pooling='avg' | |
| ) | |
| x = tf.keras.layers.Dense(512, activation='relu')(model.output) | |
| x = tf.keras.layers.Dropout(0.25)(x) | |
| x = tf.keras.layers.Dense(512, activation='relu')(x) | |
| x = tf.keras.layers.Dropout(0.25)(x) | |
| output = tf.keras.layers.Dense(6, activation='softmax')(x) | |
| nmodel = tf.keras.models.Model(model.input, output) | |
| nmodel.load_weights(weights) | |
| return preprocessing_function, nmodel | |
| def get_weights(model): | |
| if not model in model_weights: | |
| r = requests.get(models_links[model], allow_redirects=True) | |
| open(model+'.h5', 'wb').write(r.content) | |
| model_weights[model] = model+'.h5' | |
| return model_weights[model] | |
| def get_img_array(img_path, size, expand=True): | |
| # `img` is a PIL image of size 299x299 | |
| img = tf.keras.preprocessing.image.load_img(img_path, target_size=size) | |
| # `array` is a float32 Numpy array of shape (299, 299, 3) | |
| array = tf.keras.preprocessing.image.img_to_array(img) | |
| # We add a dimension to transform our array into a "batch" | |
| # of size (1, 299, 299, 3) | |
| if expand: | |
| array = np.expand_dims(array, axis=0) | |
| return array | |
| def make_gradcam_heatmap(img_array, grad_model, last_conv_layer_name, pred_index=None, tresh=0.1): | |
| # First, we create a model that maps the input image to the activations | |
| # of the last conv layer as well as the output predictions | |
| #grad_model = tf.keras.models.Model( | |
| #[model.inputs], [model.get_layer(last_conv_layer_name).output, model.output] | |
| #) | |
| # Then, we compute the gradient of the top predicted class for our input image | |
| # with respect to the activations of the last conv layer | |
| with tf.GradientTape() as tape: | |
| last_conv_layer_output, preds = grad_model(img_array) | |
| if pred_index is None: | |
| pred_index = tf.argmax(preds[0]) | |
| class_channel = preds[:, pred_index] | |
| # This is the gradient of the output neuron (top predicted or chosen) | |
| # with regard to the output feature map of the last conv layer | |
| grads = tape.gradient(class_channel, last_conv_layer_output) | |
| # This is a vector where each entry is the mean intensity of the gradient | |
| # over a specific feature map channel | |
| pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2)) | |
| # We multiply each channel in the feature map array | |
| # by "how important this channel is" with regard to the top predicted class | |
| # then sum all the channels to obtain the heatmap class activation | |
| last_conv_layer_output = last_conv_layer_output[0] | |
| heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis] | |
| heatmap = tf.squeeze(heatmap) | |
| # For visualization purpose, we will also normalize the heatmap between 0 & 1 | |
| heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap) | |
| heatmap = heatmap.numpy() | |
| return heatmap | |
| def save_and_display_gradcam(img, heatmap, cam_path="cam.jpg", alpha=0.4): | |
| # Rescale heatmap to a range 0-255 | |
| heatmap = np.uint8(255 * heatmap) | |
| im = Image.fromarray(heatmap) | |
| im = im.resize((img.shape[1], img.shape[0])) | |
| im = np.asarray(im) | |
| im = np.where(im > 0, 1, im) | |
| # Use jet colormap to colorize heatmap | |
| jet = cm.get_cmap("jet") | |
| # Use RGB values of the colormap | |
| jet_colors = jet(np.arange(256))[:, :3] | |
| jet_heatmap = jet_colors[heatmap] | |
| # Create an image with RGB colorized heatmap | |
| jet_heatmap = tf.keras.preprocessing.image.array_to_img(jet_heatmap) | |
| jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) | |
| jet_heatmap = tf.keras.preprocessing.image.img_to_array(jet_heatmap) | |
| # Superimpose the heatmap on original image | |
| superimposed_img = jet_heatmap * alpha + img | |
| superimposed_img = tf.keras.preprocessing.image.array_to_img(superimposed_img) | |
| # Save the superimposed image | |
| #superimposed_img.save(cam_path) | |
| # Display Grad CAM | |
| #display(Image(cam_path)) | |
| return superimposed_img, im | |
| def infer(model_name, input_image): | |
| print('#$$$$$$$$$$$$$$$$$$$$$$$$$ IN INFER $$$$$$$$$$$$$$$$$$$$$$$') | |
| print(model_name, type(input_image)) | |
| preprocess, model = define_model(model_name) | |
| #img = get_img_array(input_image, (target_size_dimension, target_size_dimension)) | |
| img_processed = preprocess(np.expand_dims(input_image, axis=0)) | |
| predictions = model.predict(img_processed) | |
| predictions = np.squeeze(predictions) | |
| result = {} | |
| for i in range(len(classes)): | |
| result[classes[i]] = float(predictions[i]) | |
| #predictions = np.argmax(predictions) # , axis=2 | |
| #predicted_label = classes[predictions.item()] | |
| print(input_image.shape) | |
| model.layers[-1].activation = None | |
| grad_model = tf.keras.models.Model([model.inputs], [model.get_layer(model_last_convolutional_layer[model_name]).output, model.output]) | |
| print(result) | |
| heatmap = make_gradcam_heatmap(img_processed, grad_model,model_last_convolutional_layer[model_name]) | |
| heat, mask = save_and_display_gradcam(input_image, heatmap) | |
| return result, heat | |
| gr.outputs.Image() | |
| # get the inputs | |
| css = css = ".output-image, .input-image, .image-preview {height: 300px !important}" | |
| inputs = [gr.Radio(["resnet", "efficientnet", "xception"], label='Choose a model'), gr.inputs.Image(shape=(target_size_dimension, target_size_dimension), label='Select an image')] | |
| # the app outputs two segmented images | |
| output = [gr.outputs.Label(label="Result"), gr.outputs.Image(type="numpy", label="Heatmap (Grad-CAM)")] | |
| # it's good practice to pass examples, description and a title to guide users | |
| examples = [["./content/examples/Frog.jpg"], ["./content/examples/Truck.jpg"]] | |
| title = "Grapevine image classification" | |
| description = "Upload an image to classify it. The allowed classes are - Códega, Moscatel Galego, Rabigato, Tinta Roriz, Tinto Cao, Touriga Nacional <p><b>Space author: Gabriel Carneiro</b> <br><b> [email protected] </b> </p>" | |
| gr_interface = gr.Interface(infer, inputs, output, allow_flagging=False, analytics_enabled=False, css=css, title=title, description=description).launch(enable_queue=True, debug=False) | |
| #gr_interface.launch() |