Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import torch
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
|
7 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
@@ -14,20 +15,23 @@ model.multi_label = False
|
|
14 |
model.max_det = 1000
|
15 |
|
16 |
|
17 |
-
|
18 |
|
19 |
|
20 |
-
results = model(img, size=640)
|
21 |
|
22 |
-
predictions = results.pred[0]
|
23 |
-
boxes = predictions[:, :4] # x1, y1, x2, y2
|
24 |
-
scores = predictions[:, 4]
|
25 |
-
categories = predictions[:, 5]
|
26 |
-
|
|
|
|
|
27 |
|
28 |
-
|
29 |
|
30 |
|
|
|
31 |
|
32 |
-
intf = gr.Interface(inputs=img, outputs=
|
33 |
-
intf.launch(inline=False)
|
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
|
7 |
|
8 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
|
|
15 |
model.max_det = 1000
|
16 |
|
17 |
|
18 |
+
def detect(img):
|
19 |
|
20 |
|
21 |
+
results = model(img, size=640)
|
22 |
|
23 |
+
predictions = results.pred[0]
|
24 |
+
boxes = predictions[:, :4] # x1, y1, x2, y2
|
25 |
+
scores = predictions[:, 4]
|
26 |
+
categories = predictions[:, 5]
|
27 |
+
new_image = npnp.squeeze(results.render())
|
28 |
+
print(new_image.shape)
|
29 |
+
return new_image
|
30 |
|
31 |
+
|
32 |
|
33 |
|
34 |
+
img = gr.inputs.Image(shape=(192, 192))
|
35 |
|
36 |
+
#intf = gr.Interface(fn=detect, inputs=img, outputs='image')
|
37 |
+
#intf.launch(inline=False)
|