Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,129 +1,3 @@
|
|
1 |
-
# old code
|
2 |
-
# import gradio as gr
|
3 |
-
# import torch
|
4 |
-
|
5 |
-
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')
|
6 |
-
# Define the face detector function
|
7 |
-
# def detect_faces(image):
|
8 |
-
# # Loading in yolov5s - you can switch to larger models such as yolov5m or yolov5l, or smaller such as yolov5n
|
9 |
-
# results = model(image)
|
10 |
-
|
11 |
-
# return results.render()[0]
|
12 |
-
|
13 |
-
# # Create a Gradio interface
|
14 |
-
# iface = gr.Interface(fn=detect_faces, inputs=gr.Image(source="webcam", tool =None), outputs="image")
|
15 |
-
|
16 |
-
# # Launch the interface
|
17 |
-
# iface.launch(debug=True)
|
18 |
-
|
19 |
-
# demo = gr.TabbedInterface([img_demo, vid_demo], ["Image", "Video"])
|
20 |
-
|
21 |
-
# if __name__ == "__main__":
|
22 |
-
# demo.launch()
|
23 |
-
# from IPython.display import clear_output
|
24 |
-
# import os, urllib.request
|
25 |
-
# import subprocess
|
26 |
-
# from roboflow import Roboflow
|
27 |
-
# import json
|
28 |
-
# from time import sleep
|
29 |
-
# from PIL import Image, ImageDraw
|
30 |
-
# import io
|
31 |
-
# import base64
|
32 |
-
# import requests
|
33 |
-
# from os.path import exists
|
34 |
-
# import sys, re, glob
|
35 |
-
|
36 |
-
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')
|
37 |
-
# rf = Roboflow(api_key="affmrRA3zyr34kAQF3sJ")
|
38 |
-
# project = rf.workspace().project("ecosmart-pxc0t")
|
39 |
-
# dataset = project.version(4).model
|
40 |
-
|
41 |
-
# def detect_video(video):
|
42 |
-
# HOME = os.path.expanduser("~")
|
43 |
-
# pathDoneCMD = f'{HOME}/doneCMD.sh'
|
44 |
-
# if not os.path.exists(f"{HOME}/.ipython/ttmg.py"):
|
45 |
-
# hCode = "https://raw.githubusercontent.com/yunooooo/gcct/master/res/ttmg.py"
|
46 |
-
# urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ttmg.py")
|
47 |
-
|
48 |
-
# from ttmg import (
|
49 |
-
# loadingAn,
|
50 |
-
# textAn,
|
51 |
-
# )
|
52 |
-
|
53 |
-
# os.chdir("/content/")
|
54 |
-
# os.makedirs("videos_to_infer", exist_ok=True)
|
55 |
-
# os.makedirs("inferred_videos", exist_ok=True)
|
56 |
-
# os.chdir("videos_to_infer")
|
57 |
-
# os.environ['inputFile'] = video.name
|
58 |
-
# command = ['ffmpeg', '-hide_banner', '-loglevel', 'error', '-i', input_file, '-vf', 'fps=2', output_pattern]
|
59 |
-
# subprocess.run(command)
|
60 |
-
|
61 |
-
# subprocess.run(['pip', 'install', 'roboflow'])
|
62 |
-
# install_roboflow()
|
63 |
-
# model = version.model
|
64 |
-
# print(model)
|
65 |
-
|
66 |
-
# file_path = "/content/videos_to_infer/"
|
67 |
-
# extention = ".png"
|
68 |
-
# globbed_files = sorted(glob.glob(file_path + '*' + extention))
|
69 |
-
# print(globbed_files)
|
70 |
-
# for image in globbed_files:
|
71 |
-
# # INFERENCE
|
72 |
-
# predictions = model.predict(image).json()['predictions']
|
73 |
-
# newly_rendered_image = Image.open(image)
|
74 |
-
|
75 |
-
# # RENDER
|
76 |
-
# # for each detection, create a crop and convert into CLIP encoding
|
77 |
-
# print(predictions)
|
78 |
-
# for prediction in predictions:
|
79 |
-
# # rip bounding box coordinates from current detection
|
80 |
-
# # note: infer returns center points of box as (x,y) and width, height
|
81 |
-
# # ----- but pillow crop requires the top left and bottom right points to crop
|
82 |
-
# x0 = prediction['x'] - prediction['width'] / 2
|
83 |
-
# x1 = prediction['x'] + prediction['width'] / 2
|
84 |
-
# y0 = prediction['y'] - prediction['height'] / 2
|
85 |
-
# y1 = prediction['y'] + prediction['height'] / 2
|
86 |
-
# box = (x0, y0, x1, y1)
|
87 |
-
|
88 |
-
# newly_rendered_image = draw_boxes(box, x0, y0, newly_rendered_image, prediction['class'])
|
89 |
-
|
90 |
-
# # WRITE
|
91 |
-
# save_with_bbox_renders(newly_rendered_image)
|
92 |
-
|
93 |
-
# # Run ffmpeg command
|
94 |
-
# subprocess.run(['ffmpeg', '-r', '8', '-s', '1920x1080', '-i', '/content/inferred_videos/YOUR_VIDEO_FILE_out%04d.png', '-vcodec', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', 'test.mp4'])
|
95 |
-
# # Call the function to execute the commands
|
96 |
-
# execute_commands()
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
# def draw_boxes(box, x0, y0, img, class_name):
|
101 |
-
# bbox = ImageDraw.Draw(img)
|
102 |
-
|
103 |
-
# bbox.rectangle(box, outline =color_map[class_name], width=5)
|
104 |
-
# bbox.text((x0, y0), class_name, fill='black', anchor='mm')
|
105 |
-
|
106 |
-
# return img
|
107 |
-
|
108 |
-
# def save_with_bbox_renders(img):
|
109 |
-
# file_name = os.path.basename(img.filename)
|
110 |
-
# img.save('/content/inferred_videos/' + file_name)
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
# loadingAn(name="lds")
|
115 |
-
# textAn("Installing Dependencies...", ty='twg')
|
116 |
-
# os.system('pip install git+git://github.com/AWConant/jikanpy.git')
|
117 |
-
# os.system('add-apt-repository -y ppa:jonathonf/ffmpeg-4')
|
118 |
-
# os.system('apt-get update')
|
119 |
-
# os.system('apt install mediainfo')
|
120 |
-
# os.system('apt-get install ffmpeg')
|
121 |
-
# clear_output()
|
122 |
-
# print('Installation finished.')
|
123 |
-
|
124 |
-
# Define the face detector function
|
125 |
-
|
126 |
-
|
127 |
import gradio as gr
|
128 |
import torch
|
129 |
import cv2
|
@@ -221,8 +95,12 @@ vid_interface = gr.Interface(
|
|
221 |
outputs="video",
|
222 |
title="Video"
|
223 |
)
|
|
|
|
|
|
|
|
|
224 |
# Create a list of interfaces
|
225 |
-
interfaces = [img_interface, vid_interface]
|
226 |
|
227 |
# Create the tabbed interface
|
228 |
tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import cv2
|
|
|
95 |
outputs="video",
|
96 |
title="Video"
|
97 |
)
|
98 |
+
|
99 |
+
# Add examples
|
100 |
+
examples1 = ['potatoleaf.jpg','potatoearlyblight.jpg','potatolate.jpg']
|
101 |
+
|
102 |
# Create a list of interfaces
|
103 |
+
interfaces = [img_interface, examples1, vid_interface, examples2]
|
104 |
|
105 |
# Create the tabbed interface
|
106 |
tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"])
|