Spaces:
Configuration error
Configuration error
| import simplejson | |
| import tensorflow | |
| import visualization_utils as vis_util | |
| from PIL import Image | |
| import numpy as np | |
| from PIL import Image | |
| import numpy as np | |
| import label_map_util | |
| import tensorflow as tf | |
| from matplotlib import pyplot as plt | |
| import time | |
| import cv2 | |
| from numpy import asarray | |
| #import streamlit as st | |
| import gradio as gr | |
| #st.title("Tag_Diciphering") | |
| def prediction(image_path): | |
| total_time_start = time.time() | |
| #image_path = path_image | |
| def loadImageIntoNumpyArray(image): | |
| (im_width, im_height) = image.size | |
| if image.getdata().mode == "RGBA": | |
| image = image.convert('RGB') | |
| return asarray(image).reshape((im_height, im_width, 3)).astype(np.uint8) | |
| def main(image_path,model_path,model_PATH_TO_CKPT,path_to_labels): | |
| image = Image.open(image_path) | |
| image_np = loadImageIntoNumpyArray(image) | |
| image_np_expanded = np.expand_dims(image_np, axis=0) | |
| label_map = label_map_util.load_labelmap(path_to_labels) | |
| # print("label_map------->",type(label_map)) | |
| categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=100, use_display_name=True) | |
| category_index = label_map_util.create_category_index(categories) | |
| # print("category index-->",category_index) | |
| detection_graph = tf.Graph() | |
| with detection_graph.as_default(): | |
| od_graph_def = tf.compat.v1.GraphDef() | |
| with tf.compat.v2.io.gfile.GFile(model_PATH_TO_CKPT, 'rb') as fid: | |
| serialized_graph = fid.read() | |
| od_graph_def.ParseFromString(serialized_graph) | |
| tf.import_graph_def(od_graph_def, name='') | |
| sess = tf.compat.v1.Session(graph=detection_graph) | |
| # Input tensor is the image | |
| image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') | |
| # Output tensors are the detection boxes, scores, and classes | |
| # Each box represents a part of the image where a particular object was detected | |
| detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') | |
| # Each score represents level of confidence for each of the objects. | |
| # The score is shown on the result image, together with the class label. | |
| detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') | |
| detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') | |
| # Number of objects detected | |
| num_detections = detection_graph.get_tensor_by_name('num_detections:0') | |
| (boxes, scores, classes, num) = sess.run( | |
| [detection_boxes, detection_scores, detection_classes, num_detections], | |
| feed_dict={image_tensor: image_np_expanded}) | |
| vis_util.visualize_boxes_and_labels_on_image_array( | |
| image_np, | |
| np.squeeze(boxes), | |
| np.squeeze(classes).astype(np.int32), | |
| np.squeeze(scores), | |
| category_index, | |
| use_normalized_coordinates=True, | |
| line_thickness=8, | |
| min_score_thresh=0.1) | |
| #%matplotlib inline | |
| from matplotlib import pyplot as plt | |
| # print("boxes:",boxes) | |
| # print("class:",classes) | |
| objects = [] | |
| threshold = 0.5 | |
| # print("category:",category_index) | |
| boxes = boxes[0] | |
| for index, value in enumerate(classes[0]): | |
| object_dict = {} | |
| if scores[0, index] > threshold: | |
| object_dict["class"] = (category_index.get(value)).get('name') | |
| object_dict["score"] = round(scores[0, index] * 100,2) | |
| box = tuple(boxes[index].tolist()) | |
| ymin, xmin, ymax, xmax= box | |
| im_width,im_height = 360,360 | |
| left, right, top, bottom = (xmin * im_width, xmax * im_width, | |
| ymin * im_height, ymax * im_height) | |
| object_dict["box"] = (int(left), int(right), int(top), int(bottom)) | |
| objects.append(object_dict) | |
| image_orignal = Image.open(image_path) | |
| image_np_orignal = loadImageIntoNumpyArray(image_orignal) | |
| fig, ax = plt.subplots(1,2) | |
| fig.suptitle('Tag Deciphering') | |
| ax[0].imshow(image_np_orignal,aspect='auto'); | |
| ax[1].imshow(image_np,aspect='auto'); | |
| return objects,fig | |
| image_path = image_path | |
| model_path = "//inference" | |
| model_PATH_TO_CKPT = "frozen_inference_graph.pb" | |
| path_to_labels = "tf_label_map.pbtxt" | |
| result,fig = main(image_path,model_path,model_PATH_TO_CKPT,path_to_labels) | |
| # print("result-",result) | |
| # list_to_be_sorted= [{'class': 'Y', 'score': 99.97, 'box': (157, 191, 269, 288)}, {'class': '6', 'score': 99.93, 'box': (158, 191, 247, 267)}, {'class': '9', 'score': 99.88, 'box': (156, 190, 179, 196)}, {'class': '4', 'score': 99.8, 'box': (156, 189, 198, 219)}, {'class': '1', 'score': 99.65, 'box': (157, 189, 222, 244)}, {'class': 'F', 'score': 63.4, 'box': (155, 185, 157, 175)}] | |
| newlist = sorted(result, key=lambda k: k['box'][3],reverse=False) | |
| text ='' | |
| for each in newlist: | |
| if(each['score']>65): | |
| text += each['class'] | |
| # print("text:",text) | |
| if(text!=""): | |
| text = text.replace("yellowTag", "") | |
| result = text | |
| else: | |
| result = "No Vertical Tag Detected" | |
| response = {"predictions": [result]} | |
| total_time_end = time.time() | |
| print("total time : ",round((total_time_end-total_time_start),2)) | |
| return simplejson.dumps(response),fig | |
| inputs = gr.inputs.Image(type = 'filepath') | |
| EXAMPLES = ["img1.jpg","img2.jpg","img3.jpg","img4.jpg"] | |
| DESCRIPTION = """An image is occluded if the image is blocked by any object. | |
| For example if an electric pole is filled with bushes,the image is occluded since it is not clear and blocked. | |
| Mobil-net is used to train a model with occluded and non occluded images, so that it can correctly classify the images. | |
| Occlusion detection can be used to filter unclear images and take safety measures.""" | |
| demo_app = gr.Interface( | |
| fn= prediction, | |
| inputs=inputs, | |
| outputs= ["text", "Image"], | |
| title = "Tag Diciphering", | |
| description = DESCRIPTION, | |
| examples = EXAMPLES, | |
| #cache_example = True, | |
| #live = True, | |
| theme = 'huggingface' | |
| ) | |
| demo_app.launch() |