Spaces:
Sleeping
Sleeping
Upload 14 files
Browse files- .gitattributes +3 -0
- .gitignore +7 -0
- app.py +326 -0
- best.pt +3 -0
- bus.jpg +0 -0
- image_0.jpg +0 -0
- image_1.jpg +0 -0
- image_ladder.png +3 -0
- image_tyre.png +3 -0
- render.py +63 -0
- requirements.txt +476 -0
- video.mp4 +3 -0
- yolov8n.pt +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
image_ladder.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
image_tyre.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
video.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flagged/
|
| 2 |
+
*.pt
|
| 3 |
+
*.png
|
| 4 |
+
*.jpg
|
| 5 |
+
*.mp4
|
| 6 |
+
*.mkv
|
| 7 |
+
gradio_cached_examples/
|
app.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import io
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
import cv2
|
| 8 |
+
import requests
|
| 9 |
+
import os
|
| 10 |
+
from ultralytics import YOLO
|
| 11 |
+
|
| 12 |
+
from ultralytics.utils.plotting import Annotator, colors
|
| 13 |
+
from render import custom_render_result
|
| 14 |
+
|
| 15 |
+
file_urls = [
|
| 16 |
+
'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
|
| 17 |
+
'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
|
| 18 |
+
'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
def download_file(url, save_name):
|
| 22 |
+
url = url
|
| 23 |
+
if not os.path.exists(save_name):
|
| 24 |
+
file = requests.get(url)
|
| 25 |
+
open(save_name, 'wb').write(file.content)
|
| 26 |
+
|
| 27 |
+
for i, url in enumerate(file_urls):
|
| 28 |
+
if 'mp4' in file_urls[i]:
|
| 29 |
+
download_file(
|
| 30 |
+
file_urls[i],
|
| 31 |
+
f"video.mp4"
|
| 32 |
+
)
|
| 33 |
+
else:
|
| 34 |
+
download_file(
|
| 35 |
+
file_urls[i],
|
| 36 |
+
f"image_{i}.jpg"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
def get_image_from_bytes(binary_image: bytes) -> Image:
|
| 40 |
+
"""Convert image from bytes to PIL RGB format
|
| 41 |
+
|
| 42 |
+
**Args:**
|
| 43 |
+
- **binary_image (bytes):** The binary representation of the image
|
| 44 |
+
|
| 45 |
+
**Returns:**
|
| 46 |
+
- **PIL.Image:** The image in PIL RGB format
|
| 47 |
+
"""
|
| 48 |
+
input_image = Image.open(io.BytesIO(binary_image)).convert("RGB")
|
| 49 |
+
return input_image
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_bytes_from_image(image: Image) -> bytes:
|
| 53 |
+
"""
|
| 54 |
+
Convert PIL image to Bytes
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
image (Image): A PIL image instance
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
bytes : BytesIO object that contains the image in JPEG format with quality 85
|
| 61 |
+
"""
|
| 62 |
+
return_image = io.BytesIO()
|
| 63 |
+
image.save(return_image, format='JPEG', quality=85) # save the image in JPEG format with quality 85
|
| 64 |
+
return_image.seek(0) # set the pointer to the beginning of the file
|
| 65 |
+
return return_image
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def transform_predict_to_df(results: list, labeles_dict: dict) -> pd.DataFrame:
|
| 69 |
+
"""
|
| 70 |
+
Transform predict from yolov8 (torch.Tensor) to pandas DataFrame.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
results (list): A list containing the predict output from yolov8 in the form of a torch.Tensor.
|
| 74 |
+
labeles_dict (dict): A dictionary containing the labels names, where the keys are the class ids and the values are the label names.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
predict_bbox (pd.DataFrame): A DataFrame containing the bounding box coordinates, confidence scores and class labels.
|
| 78 |
+
"""
|
| 79 |
+
# Transform the Tensor to numpy array
|
| 80 |
+
predict_bbox = pd.DataFrame(results[0].to("cpu").numpy().boxes.xyxy, columns=['xmin', 'ymin', 'xmax', 'ymax'])
|
| 81 |
+
# Add the confidence of the prediction to the DataFrame
|
| 82 |
+
predict_bbox['confidence'] = results[0].to("cpu").numpy().boxes.conf
|
| 83 |
+
# Add the class of the prediction to the DataFrame
|
| 84 |
+
predict_bbox['class'] = (results[0].to("cpu").numpy().boxes.cls).astype(int)
|
| 85 |
+
# Replace the class number with the class name from the labeles_dict
|
| 86 |
+
predict_bbox['name'] = predict_bbox["class"].replace(labeles_dict)
|
| 87 |
+
return predict_bbox
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_model_predict(model: YOLO, input_image: Image, save: bool = False, image_size: int = 1248, conf: float = 0.5,
|
| 91 |
+
augment: bool = False) -> pd.DataFrame:
|
| 92 |
+
"""
|
| 93 |
+
Get the predictions of a model on an input image.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
model (YOLO): The trained YOLO model.
|
| 97 |
+
input_image (Image): The image on which the model will make predictions.
|
| 98 |
+
save (bool, optional): Whether to save the image with the predictions. Defaults to False.
|
| 99 |
+
image_size (int, optional): The size of the image the model will receive. Defaults to 1248.
|
| 100 |
+
conf (float, optional): The confidence threshold for the predictions. Defaults to 0.5.
|
| 101 |
+
augment (bool, optional): Whether to apply data augmentation on the input image. Defaults to False.
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
pd.DataFrame: A DataFrame containing the predictions.
|
| 105 |
+
"""
|
| 106 |
+
# Make predictions
|
| 107 |
+
predictions = model.predict(
|
| 108 |
+
imgsz=image_size,
|
| 109 |
+
source=input_image,
|
| 110 |
+
conf=conf,
|
| 111 |
+
save=save,
|
| 112 |
+
augment=augment,
|
| 113 |
+
flipud=0.0,
|
| 114 |
+
fliplr=0.0,
|
| 115 |
+
mosaic=0.0,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
# Transform predictions to pandas dataframe
|
| 119 |
+
predictions = transform_predict_to_df(predictions, model.model.names)
|
| 120 |
+
return predictions
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_model_segment(model: YOLO, input_image: Image, save: bool = False, image_size: int = 1248, conf: float = 0.25,
|
| 124 |
+
augment: bool = False) -> pd.DataFrame:
|
| 125 |
+
"""
|
| 126 |
+
Get the predictions of a model on an input image.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
model (YOLO): The trained YOLO model.
|
| 130 |
+
input_image (Image): The image on which the model will make predictions.
|
| 131 |
+
save (bool, optional): Whether to save the image with the predictions. Defaults to False.
|
| 132 |
+
image_size (int, optional): The size of the image the model will receive. Defaults to 1248.
|
| 133 |
+
conf (float, optional): The confidence threshold for the predictions. Defaults to 0.25.
|
| 134 |
+
augment (bool, optional): Whether to apply data augmentation on the input image. Defaults to False.
|
| 135 |
+
|
| 136 |
+
Returns:
|
| 137 |
+
pd.DataFrame: A DataFrame containing the predictions.
|
| 138 |
+
"""
|
| 139 |
+
# Make predictions
|
| 140 |
+
predictions = model.predict(
|
| 141 |
+
imgsz=image_size,
|
| 142 |
+
source=input_image,
|
| 143 |
+
conf=conf,
|
| 144 |
+
save=save,
|
| 145 |
+
augment=augment,
|
| 146 |
+
flipud=0.0,
|
| 147 |
+
fliplr=0.0,
|
| 148 |
+
mosaic=0.0,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Transform predictions to pandas dataframe
|
| 152 |
+
predictions = transform_predict_to_df(predictions, model.model.names)
|
| 153 |
+
return predictions
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
################################# BBOX Func #####################################
|
| 157 |
+
|
| 158 |
+
def add_bboxs_on_img(image: Image, predict: pd.DataFrame()) -> Image:
|
| 159 |
+
"""
|
| 160 |
+
add a bounding box on the image
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
image (Image): input image
|
| 164 |
+
predict (pd.DataFrame): predict from model
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
Image: image whis bboxs
|
| 168 |
+
"""
|
| 169 |
+
# Create an annotator object
|
| 170 |
+
annotator = Annotator(np.array(image))
|
| 171 |
+
|
| 172 |
+
# sort predict by xmin value
|
| 173 |
+
predict = predict.sort_values(by=['xmin'], ascending=True)
|
| 174 |
+
|
| 175 |
+
# iterate over the rows of predict dataframe
|
| 176 |
+
for i, row in predict.iterrows():
|
| 177 |
+
# create the text to be displayed on image
|
| 178 |
+
text = f"{row['name']}: {int(row['confidence'] * 100)}%"
|
| 179 |
+
# get the bounding box coordinates
|
| 180 |
+
bbox = [row['xmin'], row['ymin'], row['xmax'], row['ymax']]
|
| 181 |
+
# add the bounding box and text on the image
|
| 182 |
+
annotator.box_label(bbox, text, color=colors(row['class'], True))
|
| 183 |
+
# convert the annotated image to PIL image
|
| 184 |
+
return Image.fromarray(annotator.result())
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
################################# Models #####################################
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def detect_sample_model(input_image: Image) -> pd.DataFrame:
|
| 191 |
+
"""
|
| 192 |
+
Predict from sample_model.
|
| 193 |
+
Base on YoloV8
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
input_image (Image): The input image.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
pd.DataFrame: DataFrame containing the object location.
|
| 200 |
+
"""
|
| 201 |
+
predict = get_model_predict(
|
| 202 |
+
model=model_sample_detect,
|
| 203 |
+
input_image=input_image,
|
| 204 |
+
save=False,
|
| 205 |
+
image_size=640,
|
| 206 |
+
augment=False,
|
| 207 |
+
conf=0.2,
|
| 208 |
+
)
|
| 209 |
+
return predict
|
| 210 |
+
|
| 211 |
+
def yoloV8_func(image: gr.Image = None,
|
| 212 |
+
image_size: int = 640,
|
| 213 |
+
conf_threshold: float = 0.4,
|
| 214 |
+
iou_threshold: float = 0.5,
|
| 215 |
+
model_name: str = 'YOLOv8-medium'):
|
| 216 |
+
"""This function performs YOLOv8 object detection on the given image.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
image (gr.Image, optional): Input image to detect objects on. Defaults to None.
|
| 220 |
+
image_size (int, optional): Desired image size for the model. Defaults to 640.
|
| 221 |
+
conf_threshold (float, optional): Confidence threshold for object detection. Defaults to 0.4.
|
| 222 |
+
iou_threshold (float, optional): Intersection over Union threshold for object detection. Defaults to 0.50.
|
| 223 |
+
"""
|
| 224 |
+
# Load the YOLOv8 model from the 'best.pt' checkpoint
|
| 225 |
+
# model_path = "best.pt"
|
| 226 |
+
# model = torch.hub.load('ultralytics/yolov8', 'custom', path='/content/best.pt', force_reload=True, trust_repo=True)
|
| 227 |
+
|
| 228 |
+
# Perform object detection on the input image using the YOLOv8 model
|
| 229 |
+
results = model.predict(image,
|
| 230 |
+
conf=conf_threshold,
|
| 231 |
+
iou=iou_threshold,
|
| 232 |
+
imgsz=image_size)
|
| 233 |
+
|
| 234 |
+
# Print the detected objects' information (class, coordinates, and probability)
|
| 235 |
+
box = results[0].boxes
|
| 236 |
+
#print("Object type:", box.cls)
|
| 237 |
+
#print("Coordinates:", box.xyxy)
|
| 238 |
+
#print("Probability:", box.conf)
|
| 239 |
+
|
| 240 |
+
# Render the output image with bounding boxes around detected objects
|
| 241 |
+
render = custom_render_result(model=model, image=image, result=results[0])
|
| 242 |
+
return render
|
| 243 |
+
|
| 244 |
+
model = YOLO('best.pt')
|
| 245 |
+
path = [['image_tyre.png'], ['image_ladder.png']]
|
| 246 |
+
video_path = [['video.mp4']]
|
| 247 |
+
|
| 248 |
+
outputs_image = gr.components.Image(label="Output Image")
|
| 249 |
+
|
| 250 |
+
inputs_image= [
|
| 251 |
+
gr.components.Image(label="Input Image"),
|
| 252 |
+
gr.Slider(minimum=320, maximum=1280, step=32, label="Image Size", value=640),
|
| 253 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="Confidence Threshold",value=0.4, info="Usual value is 0.5"),
|
| 254 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label="IOU Threshold",value=0.5, info="Usual value greater than 0.2"),
|
| 255 |
+
gr.components.Dropdown(["YOLOv8-nano", "YOLOv8-small", "YOLOv8-medium", "YOLOv8-large", "YOLOv8-xlarge"], value="YOLOv8-medium", label="YOLOv8 Model")
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
interface_image = gr.Interface(
|
| 259 |
+
fn=yoloV8_func,
|
| 260 |
+
inputs=inputs_image,
|
| 261 |
+
outputs=[outputs_image],
|
| 262 |
+
title="NonConforming Detector",
|
| 263 |
+
examples=path,
|
| 264 |
+
cache_examples=False,
|
| 265 |
+
)
|
| 266 |
+
def show_preds_video(video_path):
|
| 267 |
+
cap = cv2.VideoCapture(video_path)
|
| 268 |
+
|
| 269 |
+
conf_threshold = 0.4
|
| 270 |
+
iou_threshold = 0.5
|
| 271 |
+
image_size = 640
|
| 272 |
+
|
| 273 |
+
while(cap.isOpened()):
|
| 274 |
+
ret, frame = cap.read()
|
| 275 |
+
if ret:
|
| 276 |
+
frame_copy = frame.copy()
|
| 277 |
+
|
| 278 |
+
results = model.predict(frame,
|
| 279 |
+
conf=conf_threshold,
|
| 280 |
+
iou=iou_threshold,
|
| 281 |
+
imgsz=image_size)
|
| 282 |
+
|
| 283 |
+
# Print the detected objects' information (class, coordinates, and probability)
|
| 284 |
+
box = results[0].boxes
|
| 285 |
+
#print("Object type:", box.cls)
|
| 286 |
+
#print("Coordinates:", box.xyxy)
|
| 287 |
+
#print("Probability:", box.conf)
|
| 288 |
+
|
| 289 |
+
# Render the output image with bounding boxes around detected objects
|
| 290 |
+
render = custom_render_result(model=model, image=frame, result=results[0])
|
| 291 |
+
yield render
|
| 292 |
+
"""
|
| 293 |
+
outputs = model.predict(source=frame)
|
| 294 |
+
results = outputs[0].cpu().numpy()
|
| 295 |
+
for i, det in enumerate(results.boxes.xyxy):
|
| 296 |
+
cv2.rectangle(
|
| 297 |
+
frame_copy,
|
| 298 |
+
(int(det[0]), int(det[1])),
|
| 299 |
+
(int(det[2]), int(det[3])),
|
| 300 |
+
color=(0, 0, 255),
|
| 301 |
+
thickness=2,
|
| 302 |
+
lineType=cv2.LINE_AA
|
| 303 |
+
)
|
| 304 |
+
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
inputs_video = [
|
| 308 |
+
gr.components.Video(label="Input Video"),
|
| 309 |
+
|
| 310 |
+
]
|
| 311 |
+
outputs_video = [
|
| 312 |
+
gr.components.Image(label="Output Image"),
|
| 313 |
+
]
|
| 314 |
+
interface_video = gr.Interface(
|
| 315 |
+
fn=show_preds_video,
|
| 316 |
+
inputs=inputs_video,
|
| 317 |
+
outputs=outputs_video,
|
| 318 |
+
title="NonConforming Video Detector",
|
| 319 |
+
examples=video_path,
|
| 320 |
+
cache_examples=False,
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
gr.TabbedInterface(
|
| 324 |
+
[interface_image, interface_video],
|
| 325 |
+
tab_names=['Image inference', 'Video inference']
|
| 326 |
+
).queue().launch()
|
best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:303de6d2dbe1848be8fcc5d40d06ed44cd50d565785f4d96fb9292e48f90e3f4
|
| 3 |
+
size 52010006
|
bus.jpg
ADDED
|
image_0.jpg
ADDED
|
image_1.jpg
ADDED
|
image_ladder.png
ADDED
|
Git LFS Details
|
image_tyre.png
ADDED
|
Git LFS Details
|
render.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sahi.utils.cv import read_image_as_pil,get_bool_mask_from_coco_segmentation
|
| 4 |
+
from sahi.prediction import ObjectPrediction, PredictionScore,visualize_object_predictions
|
| 5 |
+
from PIL import Image
|
| 6 |
+
def custom_render_result(model,image, result,rect_th=2,text_th=2):
|
| 7 |
+
if model.overrides["task"] not in ["detect", "segment"]:
|
| 8 |
+
raise ValueError(
|
| 9 |
+
f"Model task must be either 'detect' or 'segment'. Got {model.overrides['task']}"
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
image = read_image_as_pil(image)
|
| 13 |
+
np_image = np.ascontiguousarray(image)
|
| 14 |
+
|
| 15 |
+
names = model.model.names
|
| 16 |
+
|
| 17 |
+
masks = result.masks
|
| 18 |
+
boxes = result.boxes
|
| 19 |
+
|
| 20 |
+
object_predictions = []
|
| 21 |
+
if boxes is not None:
|
| 22 |
+
det_ind = 0
|
| 23 |
+
for xyxy, conf, cls in zip(boxes.xyxy, boxes.conf, boxes.cls):
|
| 24 |
+
if masks:
|
| 25 |
+
img_height = np_image.shape[0]
|
| 26 |
+
img_width = np_image.shape[1]
|
| 27 |
+
segments = masks.segments
|
| 28 |
+
segments = segments[det_ind] # segments: np.array([[x1, y1], [x2, y2]])
|
| 29 |
+
# convert segments into full shape
|
| 30 |
+
segments[:, 0] = segments[:, 0] * img_width
|
| 31 |
+
segments[:, 1] = segments[:, 1] * img_height
|
| 32 |
+
segmentation = [segments.ravel().tolist()]
|
| 33 |
+
|
| 34 |
+
bool_mask = get_bool_mask_from_coco_segmentation(
|
| 35 |
+
segmentation, width=img_width, height=img_height
|
| 36 |
+
)
|
| 37 |
+
if sum(sum(bool_mask == 1)) <= 2:
|
| 38 |
+
continue
|
| 39 |
+
object_prediction = ObjectPrediction.from_coco_segmentation(
|
| 40 |
+
segmentation=segmentation,
|
| 41 |
+
category_name=names[int(cls)],
|
| 42 |
+
category_id=int(cls),
|
| 43 |
+
full_shape=[img_height, img_width],
|
| 44 |
+
)
|
| 45 |
+
object_prediction.score = PredictionScore(value=conf)
|
| 46 |
+
else:
|
| 47 |
+
object_prediction = ObjectPrediction(
|
| 48 |
+
bbox=xyxy.tolist(),
|
| 49 |
+
category_name=names[int(cls)],
|
| 50 |
+
category_id=int(cls),
|
| 51 |
+
score=conf,
|
| 52 |
+
)
|
| 53 |
+
object_predictions.append(object_prediction)
|
| 54 |
+
det_ind += 1
|
| 55 |
+
|
| 56 |
+
result = visualize_object_predictions(
|
| 57 |
+
image=np_image,
|
| 58 |
+
object_prediction_list=object_predictions,
|
| 59 |
+
rect_th=rect_th,
|
| 60 |
+
text_th=text_th,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
return Image.fromarray(result["image"])
|
requirements.txt
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Ultralytics requirements
|
| 2 |
+
# Usage: pip install -r requirements.txt
|
| 3 |
+
|
| 4 |
+
# Base ----------------------------------------
|
| 5 |
+
hydra-core>=1.2.0
|
| 6 |
+
matplotlib>=3.2.2
|
| 7 |
+
numpy>=1.18.5
|
| 8 |
+
opencv-python>=4.1.1
|
| 9 |
+
Pillow>=7.1.2
|
| 10 |
+
PyYAML>=5.3.1
|
| 11 |
+
requests>=2.23.0
|
| 12 |
+
scipy>=1.4.1
|
| 13 |
+
torch>=1.7.0
|
| 14 |
+
torchvision>=0.8.1
|
| 15 |
+
tqdm>=4.64.0
|
| 16 |
+
ultralytics
|
| 17 |
+
|
| 18 |
+
# Logging -------------------------------------
|
| 19 |
+
tensorboard>=2.4.1
|
| 20 |
+
# clearml
|
| 21 |
+
# comet
|
| 22 |
+
|
| 23 |
+
# Plotting ------------------------------------
|
| 24 |
+
pandas>=1.1.4
|
| 25 |
+
seaborn>=0.11.0
|
| 26 |
+
|
| 27 |
+
# Export --------------------------------------
|
| 28 |
+
# coremltools>=6.0 # CoreML export
|
| 29 |
+
# onnx>=1.12.0 # ONNX export
|
| 30 |
+
# onnx-simplifier>=0.4.1 # ONNX simplifier
|
| 31 |
+
# nvidia-pyindex # TensorRT export
|
| 32 |
+
# nvidia-tensorrt # TensorRT export
|
| 33 |
+
# scikit-learn==0.19.2 # CoreML quantization
|
| 34 |
+
# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
|
| 35 |
+
# tensorflowjs>=3.9.0 # TF.js export
|
| 36 |
+
# openvino-dev # OpenVINO export
|
| 37 |
+
|
| 38 |
+
# Extras --------------------------------------
|
| 39 |
+
ipython # interactive notebook
|
| 40 |
+
psutil # system utilization
|
| 41 |
+
thop>=0.1.1 # FLOPs computation
|
| 42 |
+
# albumentations>=1.0.3
|
| 43 |
+
# pycocotools>=2.0.6 # COCO mAP
|
| 44 |
+
# roboflow
|
| 45 |
+
|
| 46 |
+
# HUB -----------------------------------------
|
| 47 |
+
GitPython>=3.1.24
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
absl-py==1.4.0
|
| 51 |
+
addict==2.4.0
|
| 52 |
+
adjustText==0.8
|
| 53 |
+
aiofiles==23.1.0
|
| 54 |
+
aiohttp==3.8.4
|
| 55 |
+
aiosignal==1.3.1
|
| 56 |
+
alabaster==0.7.13
|
| 57 |
+
albumentations==1.3.1
|
| 58 |
+
alembic==1.13.0
|
| 59 |
+
alibi-detect==0.11.4
|
| 60 |
+
altair==5.0.1
|
| 61 |
+
annotated-types==0.5.0
|
| 62 |
+
antlr4-python3-runtime==4.9.3
|
| 63 |
+
anyio==3.7.1
|
| 64 |
+
anylabeling==0.3.3
|
| 65 |
+
appdirs==1.4.4
|
| 66 |
+
argon2-cffi==21.3.0
|
| 67 |
+
argon2-cffi-bindings==21.2.0
|
| 68 |
+
arrow==1.2.3
|
| 69 |
+
astor==0.8.1
|
| 70 |
+
astroid==2.15.6
|
| 71 |
+
asttokens==2.2.1
|
| 72 |
+
astunparse==1.6.3
|
| 73 |
+
async-timeout==4.0.2
|
| 74 |
+
attrs==23.1.0
|
| 75 |
+
av==10.0.0
|
| 76 |
+
Babel==2.12.1
|
| 77 |
+
backcall==0.2.0
|
| 78 |
+
bbox==0.9.4
|
| 79 |
+
bbox-visualizer==0.1.0
|
| 80 |
+
bce-python-sdk==0.8.87
|
| 81 |
+
beautifulsoup4==4.12.2
|
| 82 |
+
bleach==6.0.0
|
| 83 |
+
blinker==1.6.2
|
| 84 |
+
boto3==1.28.5
|
| 85 |
+
botocore==1.31.5
|
| 86 |
+
build==0.10.0
|
| 87 |
+
cachetools==5.3.1
|
| 88 |
+
catalogue==2.0.9
|
| 89 |
+
certifi==2023.5.7
|
| 90 |
+
cffi==1.15.1
|
| 91 |
+
charset-normalizer==3.2.0
|
| 92 |
+
click==8.1.5
|
| 93 |
+
clip-ea==1.0
|
| 94 |
+
cloudpickle==2.2.1
|
| 95 |
+
clusteval==2.2.1
|
| 96 |
+
clustimage==1.5.20
|
| 97 |
+
cmake==3.26.4
|
| 98 |
+
colorama==0.4.6
|
| 99 |
+
coloredlogs==15.0.1
|
| 100 |
+
colorgram.py==1.2.0
|
| 101 |
+
colorlog==6.7.0
|
| 102 |
+
colormap==1.0.4
|
| 103 |
+
colourmap==1.1.16
|
| 104 |
+
comm==0.1.3
|
| 105 |
+
commonmark==0.9.1
|
| 106 |
+
contourpy==1.1.0
|
| 107 |
+
convcolors==2.2.0
|
| 108 |
+
coverage==5.3.1
|
| 109 |
+
cryptography==41.0.7
|
| 110 |
+
cycler==0.11.0
|
| 111 |
+
Cython==3.0.0
|
| 112 |
+
darkdetect==0.8.0
|
| 113 |
+
dask==2023.7.1
|
| 114 |
+
datazets==0.1.9
|
| 115 |
+
debugpy==1.6.7
|
| 116 |
+
decorator==4.4.2
|
| 117 |
+
decord==0.6.0
|
| 118 |
+
DeepImageSearch==2.5
|
| 119 |
+
deffcode==0.2.5
|
| 120 |
+
defusedxml==0.7.1
|
| 121 |
+
Deprecated==1.2.14
|
| 122 |
+
dill==0.3.7
|
| 123 |
+
distfit==1.7.3
|
| 124 |
+
distributed==2023.7.1
|
| 125 |
+
dm-tree==0.1.8
|
| 126 |
+
docutils==0.17.1
|
| 127 |
+
easydev==0.12.1
|
| 128 |
+
echo1-coco-split==0.1.5
|
| 129 |
+
efficientnet==1.0.0
|
| 130 |
+
einops==0.3.2
|
| 131 |
+
encord==0.1.103
|
| 132 |
+
exceptiongroup==1.1.2
|
| 133 |
+
executing==1.2.0
|
| 134 |
+
extcolors==1.0.0
|
| 135 |
+
faiss-cpu==1.7.4
|
| 136 |
+
fastapi==0.100.0
|
| 137 |
+
fastjsonschema==2.17.1
|
| 138 |
+
ffmpegio-core==0.8.3
|
| 139 |
+
ffmpy==0.3.1
|
| 140 |
+
filelock==3.12.2
|
| 141 |
+
fire==0.5.0
|
| 142 |
+
Flask==2.3.2
|
| 143 |
+
flask-babel==3.1.0
|
| 144 |
+
flatbuffers==23.5.26
|
| 145 |
+
flip-data==0.2.1
|
| 146 |
+
fonttools==4.41.0
|
| 147 |
+
fqdn==1.5.1
|
| 148 |
+
frozenlist==1.4.0
|
| 149 |
+
fsspec==2023.6.0
|
| 150 |
+
ftfy==6.1.3
|
| 151 |
+
funcy==1.18
|
| 152 |
+
future==0.18.3
|
| 153 |
+
gast==0.4.0
|
| 154 |
+
gitdb==4.0.10
|
| 155 |
+
GitPython==3.1.32
|
| 156 |
+
google-auth==2.22.0
|
| 157 |
+
google-auth-oauthlib==1.0.0
|
| 158 |
+
google-pasta==0.2.0
|
| 159 |
+
gradio==3.37.0
|
| 160 |
+
gradio_client==0.2.10
|
| 161 |
+
greenlet==3.0.2
|
| 162 |
+
-e git+https://github.com/IDEA-Research/GroundingDINO@60d796825e1266e56f7e4e9e00e88de662b67bd3#egg=groundingdino
|
| 163 |
+
grpcio==1.56.0
|
| 164 |
+
h11==0.14.0
|
| 165 |
+
h5py==3.9.0
|
| 166 |
+
httpcore==0.17.3
|
| 167 |
+
httptools==0.6.1
|
| 168 |
+
httpx==0.24.1
|
| 169 |
+
huggingface-hub==0.16.4
|
| 170 |
+
humanfriendly==10.0
|
| 171 |
+
hydra-core==1.3.2
|
| 172 |
+
idna==3.4
|
| 173 |
+
ijson==3.2.3
|
| 174 |
+
image-classifiers==1.0.0
|
| 175 |
+
image-quality==1.2.7
|
| 176 |
+
ImageHash==4.3.1
|
| 177 |
+
imageio==2.31.1
|
| 178 |
+
imageio-ffmpeg==0.4.8
|
| 179 |
+
imagesize==1.4.1
|
| 180 |
+
imgaug==0.4.0
|
| 181 |
+
imgviz==1.7.2
|
| 182 |
+
importlib-metadata==6.8.0
|
| 183 |
+
importlib-resources==6.1.0
|
| 184 |
+
imutils==0.5.4
|
| 185 |
+
inquirerpy==0.3.4
|
| 186 |
+
ipykernel==6.24.0
|
| 187 |
+
ipython==8.14.0
|
| 188 |
+
ipython-genutils==0.2.0
|
| 189 |
+
ipywidgets==8.0.7
|
| 190 |
+
ismember==1.0.2
|
| 191 |
+
isoduration==20.11.0
|
| 192 |
+
isort==5.12.0
|
| 193 |
+
itsdangerous==2.1.2
|
| 194 |
+
jaraco.classes==3.3.0
|
| 195 |
+
jax==0.4.14
|
| 196 |
+
jedi==0.18.2
|
| 197 |
+
Jinja2==3.1.2
|
| 198 |
+
jmespath==1.0.1
|
| 199 |
+
joblib==1.3.1
|
| 200 |
+
json-tricks==3.16.1
|
| 201 |
+
jsonpointer==2.4
|
| 202 |
+
jsonschema==4.18.4
|
| 203 |
+
jsonschema-specifications==2023.6.1
|
| 204 |
+
jupyter==1.0.0
|
| 205 |
+
jupyter-bbox-widget==0.5.0
|
| 206 |
+
jupyter-console==6.6.3
|
| 207 |
+
jupyter-events==0.6.3
|
| 208 |
+
jupyter_client==8.3.0
|
| 209 |
+
jupyter_core==5.3.1
|
| 210 |
+
jupyter_server==2.7.0
|
| 211 |
+
jupyter_server_terminals==0.4.4
|
| 212 |
+
jupyterlab-pygments==0.2.2
|
| 213 |
+
jupyterlab-widgets==3.0.8
|
| 214 |
+
jupyterlab_executor==2023.1.1
|
| 215 |
+
keras==2.15.0
|
| 216 |
+
Keras-Applications==1.0.8
|
| 217 |
+
keras-tqdm==2.0.1
|
| 218 |
+
keyring==24.2.0
|
| 219 |
+
kiwisolver==1.4.4
|
| 220 |
+
labelme==5.2.1
|
| 221 |
+
lazy-object-proxy==1.9.0
|
| 222 |
+
lazy_loader==0.3
|
| 223 |
+
libclang==16.0.6
|
| 224 |
+
libsvm==3.23.0.4
|
| 225 |
+
lightning-utilities==0.9.0
|
| 226 |
+
linkify-it-py==2.0.2
|
| 227 |
+
llvmlite==0.40.1
|
| 228 |
+
locket==1.0.0
|
| 229 |
+
loguru==0.6.0
|
| 230 |
+
Mako==1.3.0
|
| 231 |
+
Markdown==3.4.3
|
| 232 |
+
markdown-it-py==2.2.0
|
| 233 |
+
MarkupSafe==2.1.3
|
| 234 |
+
matplotlib==3.7.2
|
| 235 |
+
matplotlib-inline==0.1.6
|
| 236 |
+
mccabe==0.7.0
|
| 237 |
+
mdit-py-plugins==0.3.3
|
| 238 |
+
mdurl==0.1.2
|
| 239 |
+
mistune==3.0.1
|
| 240 |
+
ml-dtypes==0.2.0
|
| 241 |
+
more-itertools==9.1.0
|
| 242 |
+
moviepy==1.0.3
|
| 243 |
+
mpmath==1.3.0
|
| 244 |
+
msgpack==1.0.5
|
| 245 |
+
multidict==6.0.4
|
| 246 |
+
multimethod==1.10
|
| 247 |
+
multiprocess==0.70.15
|
| 248 |
+
mypy-extensions==1.0.0
|
| 249 |
+
natsort==8.4.0
|
| 250 |
+
nbclassic==1.0.0
|
| 251 |
+
nbclient==0.8.0
|
| 252 |
+
nbconvert==7.6.0
|
| 253 |
+
nbformat==5.9.1
|
| 254 |
+
nest-asyncio==1.5.6
|
| 255 |
+
networkx==3.1
|
| 256 |
+
nodeenv==1.8.0
|
| 257 |
+
nodejs-bin==16.15.1a4
|
| 258 |
+
notebook==6.5.4
|
| 259 |
+
notebook_shim==0.2.3
|
| 260 |
+
numba==0.57.1
|
| 261 |
+
numpy==1.25.1
|
| 262 |
+
oauthlib==3.2.2
|
| 263 |
+
omegaconf==2.3.0
|
| 264 |
+
onnx==1.13.1
|
| 265 |
+
onnx-simplifier==0.4.33
|
| 266 |
+
onnxruntime==1.14.1
|
| 267 |
+
opencv-python==4.8.0.74
|
| 268 |
+
orjson==3.9.2
|
| 269 |
+
packaging==23.1
|
| 270 |
+
pandas==2.0.3
|
| 271 |
+
Pillow==10.0.0
|
| 272 |
+
psutil==5.9.5
|
| 273 |
+
pydantic==2.0.3
|
| 274 |
+
openvino==2023.1.0
|
| 275 |
+
openvino-telemetry==2023.2.1
|
| 276 |
+
opt-einsum==3.3.0
|
| 277 |
+
orjson==3.9.2
|
| 278 |
+
overrides==7.3.1
|
| 279 |
+
p-tqdm==1.4.0
|
| 280 |
+
packaging==23.1
|
| 281 |
+
paddle-bfloat==0.1.7
|
| 282 |
+
paddlepaddle==2.5.1
|
| 283 |
+
paddleseg==2.8.0
|
| 284 |
+
pandas==2.0.3
|
| 285 |
+
pandas-stubs==2.0.1.230501
|
| 286 |
+
pandera==0.15.2
|
| 287 |
+
pandocfilters==1.5.0
|
| 288 |
+
parso==0.8.3
|
| 289 |
+
partd==1.4.0
|
| 290 |
+
pastel==0.2.1
|
| 291 |
+
patchify==0.2.3
|
| 292 |
+
pathos==0.3.1
|
| 293 |
+
patsy==0.5.4
|
| 294 |
+
pca==2.0.5
|
| 295 |
+
pexpect==4.8.0
|
| 296 |
+
pfzy==0.3.4
|
| 297 |
+
pickleshare==0.7.5
|
| 298 |
+
Pillow==10.0.0
|
| 299 |
+
pip-tools==7.1.0
|
| 300 |
+
piq==0.8.0
|
| 301 |
+
pkginfo==1.9.6
|
| 302 |
+
platformdirs==3.8.1
|
| 303 |
+
plotly==5.15.0
|
| 304 |
+
pluggy==1.2.0
|
| 305 |
+
poethepoet==0.16.5
|
| 306 |
+
pox==0.3.3
|
| 307 |
+
ppft==1.7.6.7
|
| 308 |
+
prettytable==3.8.0
|
| 309 |
+
prisma==0.8.2
|
| 310 |
+
proglog==0.1.10
|
| 311 |
+
prometheus-client==0.17.1
|
| 312 |
+
prompt-toolkit==3.0.39
|
| 313 |
+
protobuf==3.20.3
|
| 314 |
+
psutil==5.9.5
|
| 315 |
+
ptyprocess==0.7.0
|
| 316 |
+
pure-eval==0.2.2
|
| 317 |
+
pyasn1==0.5.0
|
| 318 |
+
pyasn1-modules==0.3.0
|
| 319 |
+
pybboxes==0.1.6
|
| 320 |
+
pyclay-annotation-utils @ https://github.com/cm107/annotation_utils/archive/development.zip#sha256=fbab99536104fe62a02d3113af8c8a62a10242aaf4dfe6cd818e34f2d199e0c9
|
| 321 |
+
pyclay-common-utils @ https://github.com/cm107/common_utils/archive/master.zip#sha256=9b2c664c8aa339a81edc087586d7cafa87c3b9a5aa91a13922e356ece148db4a
|
| 322 |
+
pyclay-logger @ https://github.com/cm107/logger/archive/master.zip#sha256=cddf54ebec6ecedd65ee832afc76f500dad8fea1bc44619318147d047568348b
|
| 323 |
+
pyclay-streamer @ https://github.com/cm107/streamer/archive/master.zip#sha256=8f63126e1c965a649d80972b2e7b7932252be144b0985a117c3a85bd3c1a56ba
|
| 324 |
+
pycocotools==2.0.6
|
| 325 |
+
pycparser==2.21
|
| 326 |
+
pycryptodome==3.18.0
|
| 327 |
+
pydantic==2.0.3
|
| 328 |
+
pydantic_core==2.3.0
|
| 329 |
+
pyDeprecate==0.3.2
|
| 330 |
+
pydub==0.25.1
|
| 331 |
+
pyee==8.2.2
|
| 332 |
+
Pygments==2.15.1
|
| 333 |
+
PyJWT==2.8.0
|
| 334 |
+
pylabel==0.1.53
|
| 335 |
+
pylint==2.17.4
|
| 336 |
+
pynndescent==0.5.11
|
| 337 |
+
pyparsing==3.0.9
|
| 338 |
+
pypickle==1.1.0
|
| 339 |
+
pyppeteer==1.0.2
|
| 340 |
+
pyproject_hooks==1.0.0
|
| 341 |
+
PyQt5==5.15.9
|
| 342 |
+
PyQt5-Qt5==5.15.2
|
| 343 |
+
PyQt5-sip==12.12.1
|
| 344 |
+
pyquaternion==0.9.9
|
| 345 |
+
pyreadline3==3.4.1
|
| 346 |
+
python-dateutil==2.8.2
|
| 347 |
+
python-dotenv==0.21.1
|
| 348 |
+
python-json-logger==2.0.7
|
| 349 |
+
python-multipart==0.0.6
|
| 350 |
+
python-splitter==0.0.3
|
| 351 |
+
pytz==2023.3
|
| 352 |
+
PyWavelets==1.4.1
|
| 353 |
+
pywin32==306
|
| 354 |
+
pywin32-ctypes==0.2.2
|
| 355 |
+
pywinpty==2.0.11
|
| 356 |
+
PyYAML==6.0.1
|
| 357 |
+
pyzmq==25.1.0
|
| 358 |
+
qimage2ndarray==1.10.0
|
| 359 |
+
qtconsole==5.4.3
|
| 360 |
+
QtPy==2.3.1
|
| 361 |
+
qudida==0.0.4
|
| 362 |
+
rapidfuzz==3.1.1
|
| 363 |
+
rarfile==4.0
|
| 364 |
+
readme-renderer==40.0
|
| 365 |
+
referencing==0.29.3
|
| 366 |
+
regex==2023.6.3
|
| 367 |
+
requests==2.31.0
|
| 368 |
+
requests-oauthlib==1.3.1
|
| 369 |
+
requests-toolbelt==1.0.0
|
| 370 |
+
rfc3339-validator==0.1.4
|
| 371 |
+
rfc3986==2.0.0
|
| 372 |
+
rfc3986-validator==0.1.1
|
| 373 |
+
rich==12.6.0
|
| 374 |
+
rpds-py==0.8.11
|
| 375 |
+
rsa==4.9
|
| 376 |
+
s3transfer==0.6.1
|
| 377 |
+
safetensors==0.3.1
|
| 378 |
+
sahi==0.11.15
|
| 379 |
+
scatterd==1.3.7
|
| 380 |
+
scikit-image==0.21.0
|
| 381 |
+
scikit-learn==1.3.0
|
| 382 |
+
scipy==1.11.1
|
| 383 |
+
seaborn==0.12.2
|
| 384 |
+
segment-anything @ git+https://github.com/facebookresearch/segment-anything.git@6fdee8f2727f4506cfbbe553e23b895e27956588
|
| 385 |
+
segmentation-models==1.0.1
|
| 386 |
+
semantic-version==2.10.0
|
| 387 |
+
Send2Trash==1.8.2
|
| 388 |
+
Shapely==1.8.5.post1
|
| 389 |
+
six==1.16.0
|
| 390 |
+
smmap==5.0.0
|
| 391 |
+
sniffio==1.3.0
|
| 392 |
+
snowballstemmer==2.2.0
|
| 393 |
+
sortedcontainers==2.4.0
|
| 394 |
+
soupsieve==2.4.1
|
| 395 |
+
Sphinx==4.0.3
|
| 396 |
+
sphinx-rtd-theme==1.2.2
|
| 397 |
+
sphinxcontrib-applehelp==1.0.4
|
| 398 |
+
sphinxcontrib-devhelp==1.0.2
|
| 399 |
+
sphinxcontrib-htmlhelp==2.0.1
|
| 400 |
+
sphinxcontrib-jquery==4.1
|
| 401 |
+
sphinxcontrib-jsmath==1.0.1
|
| 402 |
+
sphinxcontrib-qthelp==1.0.3
|
| 403 |
+
sphinxcontrib-serializinghtml==1.1.5
|
| 404 |
+
split-folders==0.5.1
|
| 405 |
+
SQLAlchemy==1.4.41
|
| 406 |
+
sqlalchemy2-stubs==0.0.2a37
|
| 407 |
+
sqlmodel==0.0.8
|
| 408 |
+
stack-data==0.6.2
|
| 409 |
+
starlette==0.27.0
|
| 410 |
+
static-ffmpeg==2.5
|
| 411 |
+
statsmodels==0.14.1
|
| 412 |
+
stringcase==1.2.0
|
| 413 |
+
supervision==0.6.0
|
| 414 |
+
sympy==1.12
|
| 415 |
+
tblib==2.0.0
|
| 416 |
+
tenacity==8.2.2
|
| 417 |
+
tensorboard==2.15.1
|
| 418 |
+
tensorboard-data-server==0.7.1
|
| 419 |
+
tensorflow-estimator==2.15.0
|
| 420 |
+
tensorflow-intel==2.15.0
|
| 421 |
+
tensorflow-io-gcs-filesystem==0.31.0
|
| 422 |
+
tensorflow-probability==0.19.0
|
| 423 |
+
termcolor==2.4.0
|
| 424 |
+
terminado==0.17.1
|
| 425 |
+
terminaltables==3.1.10
|
| 426 |
+
thop==0.1.1.post2209072238
|
| 427 |
+
threadpoolctl==3.2.0
|
| 428 |
+
tifffile==2023.7.10
|
| 429 |
+
timm==0.9.2
|
| 430 |
+
tinycss2==1.2.1
|
| 431 |
+
tokenizers==0.13.3
|
| 432 |
+
toml==0.10.2
|
| 433 |
+
tomli==2.0.1
|
| 434 |
+
tomlkit==0.11.8
|
| 435 |
+
toolz==0.12.0
|
| 436 |
+
torch==2.0.1+cu117
|
| 437 |
+
torchaudio==2.0.2+cu117
|
| 438 |
+
torchmetrics==1.2.0
|
| 439 |
+
torchvision==0.15.2+cu117
|
| 440 |
+
tornado==6.3.2
|
| 441 |
+
tqdm==4.65.0
|
| 442 |
+
traitlets==5.9.0
|
| 443 |
+
transformers==4.30.2
|
| 444 |
+
treelib==1.6.1
|
| 445 |
+
twine==4.0.2
|
| 446 |
+
typeguard==4.1.5
|
| 447 |
+
typer==0.6.1
|
| 448 |
+
types-cachetools==5.3.0.7
|
| 449 |
+
types-pytz==2022.7.1.2
|
| 450 |
+
typing-inspect==0.9.0
|
| 451 |
+
typing_extensions==4.7.1
|
| 452 |
+
tzdata==2023.3
|
| 453 |
+
uc-micro-py==1.0.2
|
| 454 |
+
ultralytics==8.0.136
|
| 455 |
+
umap-learn==0.5.5
|
| 456 |
+
uri-template==1.3.0
|
| 457 |
+
urllib3==1.26.18
|
| 458 |
+
uvicorn==0.23.1
|
| 459 |
+
video2images==1.3
|
| 460 |
+
visualdl==2.5.3
|
| 461 |
+
watchdog==2.3.1
|
| 462 |
+
watchfiles==0.21.0
|
| 463 |
+
wcwidth==0.2.12
|
| 464 |
+
webcolors==1.13
|
| 465 |
+
webencodings==0.5.1
|
| 466 |
+
websocket-client==1.6.1
|
| 467 |
+
websockets==11.0.3
|
| 468 |
+
Werkzeug==2.3.6
|
| 469 |
+
widgetsnbextension==4.0.8
|
| 470 |
+
win32-setctime==1.1.0
|
| 471 |
+
wrapt==1.14.1
|
| 472 |
+
xlrd==2.0.1
|
| 473 |
+
yapf==0.40.1
|
| 474 |
+
yarl==1.9.2
|
| 475 |
+
zict==3.0.0
|
| 476 |
+
zipp==3.16.2
|
video.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:919e4ff5f44e76e718cc55b7b2eaf7cd13a97d6824fe420af514fea98576b0d7
|
| 3 |
+
size 5489045
|
yolov8n.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
|
| 3 |
+
size 6534387
|