Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -132,49 +132,69 @@
|
|
132 |
# if __name__ == "__main__":
|
133 |
# demo.launch()
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
import gradio as gr
|
136 |
-
import
|
137 |
import cv2
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
img_edges = cv2.adaptiveThreshold(
|
150 |
-
cv2.medianBlur(img_edges, 7),
|
151 |
-
255,
|
152 |
-
cv2.ADAPTIVE_THRESH_MEAN_C,
|
153 |
-
cv2.THRESH_BINARY,
|
154 |
-
9,
|
155 |
-
2,
|
156 |
-
)
|
157 |
-
img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
158 |
-
# combine color and edges
|
159 |
-
img = cv2.bitwise_and(img_color, img_edges)
|
160 |
-
return img
|
161 |
-
elif transform == "edges":
|
162 |
-
# perform edge detection
|
163 |
-
img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
|
164 |
-
return img
|
165 |
-
else:
|
166 |
-
return np.flipud(frame)
|
167 |
-
|
168 |
-
with gr.Blocks() as demo:
|
169 |
-
with gr.Row():
|
170 |
-
with gr.Column():
|
171 |
-
transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
|
172 |
-
value="flip", label="Transformation")
|
173 |
-
input_img = gr.Image(sources=["webcam"], type="numpy")
|
174 |
-
with gr.Column():
|
175 |
-
output_img = gr.Image(streaming=True)
|
176 |
-
dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],
|
177 |
-
time_limit=30, stream_every=0.1, concurrency_limit=30)
|
178 |
-
|
179 |
-
if __name__ == "__main__":
|
180 |
-
demo.launch()
|
|
|
132 |
# if __name__ == "__main__":
|
133 |
# demo.launch()
|
134 |
|
135 |
+
# import gradio as gr
|
136 |
+
# import numpy as np
|
137 |
+
# import cv2
|
138 |
+
# from ultralytics import YOLO
|
139 |
+
|
140 |
+
# model = YOLO('Model_IV.pt')
|
141 |
+
|
142 |
+
# def transform_cv2(frame, transform):
|
143 |
+
# if transform == "cartoon":
|
144 |
+
# # prepare color
|
145 |
+
# img_color = cv2.pyrDown(cv2.pyrDown(frame))
|
146 |
+
# for _ in range(6):
|
147 |
+
# img_color = cv2.bilateralFilter(img_color, 9, 9, 7)
|
148 |
+
# img_color = cv2.pyrUp(cv2.pyrUp(img_color))
|
149 |
+
|
150 |
+
# # prepare edges
|
151 |
+
# img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
152 |
+
# img_edges = cv2.adaptiveThreshold(
|
153 |
+
# cv2.medianBlur(img_edges, 7),
|
154 |
+
# 255,
|
155 |
+
# cv2.ADAPTIVE_THRESH_MEAN_C,
|
156 |
+
# cv2.THRESH_BINARY,
|
157 |
+
# 9,
|
158 |
+
# 2,
|
159 |
+
# )
|
160 |
+
# img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)
|
161 |
+
# # combine color and edges
|
162 |
+
# img = cv2.bitwise_and(img_color, img_edges)
|
163 |
+
# return img
|
164 |
+
# elif transform == "edges":
|
165 |
+
# # perform edge detection
|
166 |
+
# img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)
|
167 |
+
# return img
|
168 |
+
# else:
|
169 |
+
# return np.flipud(frame)
|
170 |
+
|
171 |
+
# with gr.Blocks() as demo:
|
172 |
+
# with gr.Row():
|
173 |
+
# with gr.Column():
|
174 |
+
# transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
|
175 |
+
# value="flip", label="Transformation")
|
176 |
+
# input_img = gr.Image(sources=["webcam"], type="numpy")
|
177 |
+
# with gr.Column():
|
178 |
+
# output_img = gr.Image(streaming=True)
|
179 |
+
# dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],
|
180 |
+
# time_limit=30, stream_every=0.1, concurrency_limit=30)
|
181 |
+
|
182 |
+
# if __name__ == "__main__":
|
183 |
+
# demo.launch()
|
184 |
+
|
185 |
+
###
|
186 |
+
|
187 |
import gradio as gr
|
188 |
+
import torch
|
189 |
import cv2
|
190 |
|
191 |
+
# Load the YOLOv8 model
|
192 |
+
model = torch.hub.load('ultralytics/yolov8', 'Model_IV')
|
193 |
+
|
194 |
+
def inference(img):
|
195 |
+
results = model(img)
|
196 |
+
annotated_img = results.render()[0]
|
197 |
+
return annotated_img
|
198 |
+
|
199 |
+
iface = gr.Interface(fn=inference, inputs="webcam", outputs="image")
|
200 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|