Upload 23 files
Browse files- model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/README.md +43 -0
- model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/models/cutoff_yolov10x_w8a8.qnn231.ctx.bin +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/models/post_pro.onnx +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/python/bus.jpg +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/README.md +52 -0
- model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/models/cutoff_yolov10x_w8a16.qnn231.ctx.bin +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/models/post_pro.onnx +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/python/bus.jpg +3 -0
- model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/python/run_test.py +265 -0
- model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/README.md +52 -0
- model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/models/cutoff_yolov10x_fp16.qnn231.ctx.bin +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/models/post_pro.onnx +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/python/bus.jpg +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/python/run_test.py +265 -0
- model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/README.md +43 -0
- model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/models/cutoff_yolov10x_w8a8.qnn231.ctx.bin +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/models/post_pro.onnx +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/python/bus.jpg +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/README.md +52 -0
- model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/models/cutoff_yolov10x_w8a16.qnn231.ctx.bin +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/models/post_pro.onnx +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/python/bus.jpg +3 -0
- model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/python/run_test.py +265 -0
model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1,3,640,640]
|
5 |
+
- Number of parameters: 30.34M
|
6 |
+
- Model size: 112.73M
|
7 |
+
- Output shape: [1,300,6]
|
8 |
+
|
9 |
+
Source model repository: [YOLOV10-x](https://github.com/THU-MIG/yolov10)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: INT8
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: FV01 QCS6490
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/4ded1198-4d50-4258-9043-67f419a405c2)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/models/cutoff_yolov10x_w8a8.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0088d18dbd3b39516ca04fdabd4b082d756e5eefd81dc4416e8a321a8576db8c
|
3 |
+
size 39414832
|
model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/models/post_pro.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec661775a0a2bc3fac35bf15917f59d991b5b5131db57d855aa57a2543be6ca4
|
3 |
+
size 6852
|
model_farm_yolov10x_qcs6490_qnn2.31_int8_aidlite/python/bus.jpg
ADDED
![]() |
Git LFS Details
|
model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/README.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1,3,640,640]
|
5 |
+
- Number of parameters: 30.34M
|
6 |
+
- Model size: 112.73M
|
7 |
+
- Output shape: [1,300,6]
|
8 |
+
|
9 |
+
Source model repository: [YOLOV10-x](https://github.com/THU-MIG/yolov10)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: W8A16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: FV01 QCS6490
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/95d04959-5545-4b82-ade8-f2df6036c803)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
|
47 |
+
#### python
|
48 |
+
> Note: The ONNX model is used in the post-processing stage, and users' own models can also reuse this ONNX model.
|
49 |
+
```bash
|
50 |
+
cd model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite
|
51 |
+
python3 python/run_test.py --target_model ./models/cutoff_yolov10x_w8a16.qnn231.ctx.bin --imgs ./python/bus.jpg --invoke_nums 10
|
52 |
+
```
|
model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/models/cutoff_yolov10x_w8a16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d31c9da6472e7cca8bd119fc3cbdfcec8a7317601ea9cb50e1efd0da039f410
|
3 |
+
size 41729232
|
model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/models/post_pro.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec661775a0a2bc3fac35bf15917f59d991b5b5131db57d855aa57a2543be6ca4
|
3 |
+
size 6852
|
model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/python/bus.jpg
ADDED
![]() |
Git LFS Details
|
model_farm_yolov10x_qcs6490_qnn2.31_w8a16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import os
|
5 |
+
import aidlite
|
6 |
+
import argparse
|
7 |
+
import onnxruntime
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
"""返回 COCO 数据集的类别名称(80 类)。"""
|
12 |
+
classes=[
|
13 |
+
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
|
14 |
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
|
15 |
+
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
|
16 |
+
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
|
17 |
+
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
|
18 |
+
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
|
19 |
+
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
|
20 |
+
"sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
|
21 |
+
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
|
22 |
+
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
|
23 |
+
]
|
24 |
+
|
25 |
+
|
26 |
+
def letterbox(
|
27 |
+
im,
|
28 |
+
new_shape,
|
29 |
+
color=(114, 114, 114),
|
30 |
+
auto=False,
|
31 |
+
scaleFill=False,
|
32 |
+
scaleup=True,
|
33 |
+
stride=32,
|
34 |
+
):
|
35 |
+
"""
|
36 |
+
Resize and pad image while meeting stride-multiple constraints
|
37 |
+
Returns:
|
38 |
+
im (array): (height, width, 3)
|
39 |
+
ratio (array): [w_ratio, h_ratio]
|
40 |
+
(dw, dh) (array): [w_padding h_padding]
|
41 |
+
"""
|
42 |
+
shape = im.shape[:2] # current shape [height, width]
|
43 |
+
if isinstance(new_shape, int): # [h_rect, w_rect]
|
44 |
+
new_shape = (new_shape, new_shape)
|
45 |
+
|
46 |
+
# Scale ratio (new / old)
|
47 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
48 |
+
if not scaleup: # only scale down, do not scale up (for better val mAP)
|
49 |
+
r = min(r, 1.0)
|
50 |
+
|
51 |
+
# Compute padding
|
52 |
+
ratio = r, r # wh ratios
|
53 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w h
|
54 |
+
dw, dh = (
|
55 |
+
new_shape[1] - new_unpad[0],
|
56 |
+
new_shape[0] - new_unpad[1],
|
57 |
+
) # wh padding
|
58 |
+
|
59 |
+
if auto: # minimum rectangle
|
60 |
+
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
|
61 |
+
elif scaleFill: # stretch
|
62 |
+
dw, dh = 0.0, 0.0
|
63 |
+
new_unpad = (new_shape[1], new_shape[0]) # [w h]
|
64 |
+
ratio = (
|
65 |
+
new_shape[1] / shape[1],
|
66 |
+
new_shape[0] / shape[0],
|
67 |
+
) # [w_ratio, h_ratio]
|
68 |
+
|
69 |
+
dw /= 2 # divide padding into 2 sides
|
70 |
+
dh /= 2
|
71 |
+
if shape[::-1] != new_unpad: # resize
|
72 |
+
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
|
73 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
74 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
75 |
+
im = cv2.copyMakeBorder(
|
76 |
+
im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
|
77 |
+
)
|
78 |
+
return im, ratio, (dw, dh)
|
79 |
+
|
80 |
+
|
81 |
+
class Colors:
|
82 |
+
|
83 |
+
def __init__(self):
|
84 |
+
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
|
85 |
+
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
|
86 |
+
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
|
87 |
+
self.n = len(self.palette)
|
88 |
+
|
89 |
+
def __call__(self, i, bgr=False):
|
90 |
+
c = self.palette[int(i) % self.n]
|
91 |
+
return (c[2], c[1], c[0]) if bgr else c
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
def hex2rgb(h):
|
95 |
+
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
96 |
+
|
97 |
+
|
98 |
+
def rescale_coords(boxes, image_shape, input_shape):
|
99 |
+
image_height, image_width = image_shape
|
100 |
+
input_height, input_width = input_shape
|
101 |
+
scale = min(input_width / image_width, input_height / image_height)
|
102 |
+
pad_w = (input_width - image_width * scale) / 2
|
103 |
+
pad_h = (input_height - image_height * scale) / 2
|
104 |
+
boxes[:, [0, 2]] = (boxes[:, [0, 2]] - pad_w) / scale
|
105 |
+
boxes[:, [1, 3]] = (boxes[:, [1, 3]] - pad_h) / scale
|
106 |
+
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, image_width)
|
107 |
+
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, image_height)
|
108 |
+
return boxes.astype(int)
|
109 |
+
|
110 |
+
def preprocess(image, input_shape):
|
111 |
+
# Resize
|
112 |
+
input_img = letterbox(image, input_shape)[0]
|
113 |
+
# Transpose
|
114 |
+
# input_img = input_img[..., ::-1].transpose(2, 0, 1)
|
115 |
+
input_img = input_img[..., ::-1]
|
116 |
+
# Expand
|
117 |
+
input_img = input_img[np.newaxis, :, :, :].astype(np.float32)
|
118 |
+
# Contiguous
|
119 |
+
input_img = np.ascontiguousarray(input_img)
|
120 |
+
# Norm
|
121 |
+
blob = input_img / 255.0
|
122 |
+
return blob
|
123 |
+
|
124 |
+
def postprocess(output_data, conf_thres, image_shape, input_shape):
|
125 |
+
outs = output_data # test.py 中 output_data 已经是 (8400, 84)
|
126 |
+
outs = outs[outs[:, 4] >= conf_thres]
|
127 |
+
boxes = outs[:, :4]
|
128 |
+
scores = outs[:, -2]
|
129 |
+
labels = outs[:, -1].astype(int)
|
130 |
+
boxes = rescale_coords(boxes, image_shape, input_shape)
|
131 |
+
return boxes, scores, labels
|
132 |
+
|
133 |
+
class qnn_yolov10:
|
134 |
+
def __init__(self,model_path,sdk="qnn",backend="npu"):
|
135 |
+
self.config = aidlite.Config.create_instance()
|
136 |
+
if self.config is None:
|
137 |
+
print("Create config failed !")
|
138 |
+
return False
|
139 |
+
|
140 |
+
self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
141 |
+
if sdk.lower()=="qnn":
|
142 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
|
143 |
+
else :
|
144 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
145 |
+
|
146 |
+
|
147 |
+
if backend.lower() =="npu":
|
148 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
149 |
+
elif backend.lower() =="gpu":
|
150 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_GPU
|
151 |
+
else:
|
152 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_CPU
|
153 |
+
self.config.is_quantify_model = 1
|
154 |
+
|
155 |
+
self.model = aidlite.Model.create_instance(model_path)
|
156 |
+
if self.model is None:
|
157 |
+
print("Create model failed !")
|
158 |
+
return False
|
159 |
+
self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
|
160 |
+
if self.interpreter is None:
|
161 |
+
print("build_interpretper_from_model_and_config failed !")
|
162 |
+
return None
|
163 |
+
result = self.interpreter.init()
|
164 |
+
if result != 0:
|
165 |
+
print(f"interpreter init failed !")
|
166 |
+
return False
|
167 |
+
result = self.interpreter.load_model()
|
168 |
+
if result != 0:
|
169 |
+
print("interpreter load model failed !")
|
170 |
+
return False
|
171 |
+
print("detect model load success!")
|
172 |
+
|
173 |
+
def __del__(self):
|
174 |
+
self.interpreter.destory()
|
175 |
+
|
176 |
+
def __call__(self, img_input,invoke_nums):
|
177 |
+
result = self.interpreter.set_input_tensor(0, img_input.data)
|
178 |
+
if result != 0:
|
179 |
+
print("interpreter set_input_tensor() failed")
|
180 |
+
invoke_time=[]
|
181 |
+
for i in range(invoke_nums):
|
182 |
+
t1=time.time()
|
183 |
+
result = self.interpreter.invoke()
|
184 |
+
if result != 0:
|
185 |
+
print("interpreter set_input_tensor() failed")
|
186 |
+
cost_time = (time.time()-t1)*1000
|
187 |
+
invoke_time.append(cost_time)
|
188 |
+
|
189 |
+
max_invoke_time = max(invoke_time)
|
190 |
+
min_invoke_time = min(invoke_time)
|
191 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
192 |
+
var_invoketime=np.var(invoke_time)
|
193 |
+
print("====================================")
|
194 |
+
print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
195 |
+
print("====================================")
|
196 |
+
|
197 |
+
output1 = self.interpreter.get_output_tensor(0)
|
198 |
+
return output1
|
199 |
+
|
200 |
+
|
201 |
+
class onnx_yolov10:
|
202 |
+
def __init__(self,model_path):
|
203 |
+
self.sess_options = onnxruntime.SessionOptions()
|
204 |
+
self.sess_options.intra_op_num_threads = 1
|
205 |
+
self.sess = onnxruntime.InferenceSession(model_path,sess_options=self.sess_options)
|
206 |
+
self.outname = [i.name for i in self.sess.get_outputs()]
|
207 |
+
self.inname = [i.name for i in self.sess.get_inputs()]
|
208 |
+
def __call__(self,img_input):
|
209 |
+
inp = {self.inname[0]:img_input}
|
210 |
+
t1=time.time()
|
211 |
+
out_put = self.sess.run(self.outname,inp)[0]
|
212 |
+
cost_time = (time.time()-t1)*1000
|
213 |
+
return out_put
|
214 |
+
|
215 |
+
|
216 |
+
def main(args):
|
217 |
+
input_shape = (640, 640)
|
218 |
+
conf_thres = 0.25
|
219 |
+
img_path = args.imgs
|
220 |
+
invoke_nums = args.invoke_nums
|
221 |
+
qnn_path = args.target_model
|
222 |
+
|
223 |
+
# qnn +onnx推理
|
224 |
+
qnn_model1 = qnn_yolov10(qnn_path)
|
225 |
+
|
226 |
+
onnx_model_path = 'models/post_pro.onnx'
|
227 |
+
onnx_model = onnx_yolov10(onnx_model_path)
|
228 |
+
|
229 |
+
print("Begin to run qnn...")
|
230 |
+
im0 = cv2.imread(img_path)
|
231 |
+
image_shape = im0.shape[:2]
|
232 |
+
img_qnn = preprocess(im0, input_shape)
|
233 |
+
qnn_out_shape = (1,8400,84)
|
234 |
+
out1 = qnn_model1(img_qnn,invoke_nums)
|
235 |
+
out1 = out1.reshape(*qnn_out_shape)
|
236 |
+
out2 = onnx_model(out1)[0]
|
237 |
+
|
238 |
+
boxes, scores, labels = postprocess(out2, conf_thres, image_shape, input_shape)
|
239 |
+
print(f"Detect {len(boxes)} targets")
|
240 |
+
|
241 |
+
colors = Colors()
|
242 |
+
for label, score, box in zip(labels, scores, boxes):
|
243 |
+
label_text = f'{classes[label]}: {score:.2f}'
|
244 |
+
color = colors(label, True)
|
245 |
+
cv2.rectangle(im0, (box[0], box[1]), (box[2], box[3]), color, 2, lineType=cv2.LINE_AA)
|
246 |
+
cv2.putText(im0, label_text, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
247 |
+
|
248 |
+
output_image_path = "python/detected_results.jpg"
|
249 |
+
cv2.imwrite(output_image_path, im0)
|
250 |
+
print(f"Saved detected result to {output_image_path}")
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
def parser_args():
|
255 |
+
parser = argparse.ArgumentParser(description="Inferrence yolov10 model")
|
256 |
+
parser.add_argument('--target_model',type=str,default='./models/cutoff_yolov10x_w8a16.qnn231.ctx.bin.aidem',help="Predict images path")
|
257 |
+
parser.add_argument('--imgs',type=str,default='./python/bus.jpg',help="Predict images path")
|
258 |
+
parser.add_argument('--invoke_nums',type=int,default=10,help="Inference nums")
|
259 |
+
args = parser.parse_args()
|
260 |
+
return args
|
261 |
+
|
262 |
+
|
263 |
+
if __name__ == "__main__":
|
264 |
+
args = parser_args()
|
265 |
+
main(args)
|
model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/README.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1,3,640,640]
|
5 |
+
- Number of parameters: 30.34M
|
6 |
+
- Model size: 112.73M
|
7 |
+
- Output shape: [1,300,6]
|
8 |
+
|
9 |
+
Source model repository: [YOLOV10-x](https://github.com/THU-MIG/yolov10)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: FP16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: SNM972 QCS8550
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/47ed290a-394b-4b75-8d25-5fa2058c6c48)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
|
47 |
+
#### python
|
48 |
+
> Note: The ONNX model is used in the post-processing stage, and users' own models can also reuse this ONNX model.
|
49 |
+
```bash
|
50 |
+
cd model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite
|
51 |
+
python3 python/run_test.py --target_model ./models/cutoff_yolov10x_fp16.qnn231.ctx.bin --imgs ./python/bus.jpg --invoke_nums 10
|
52 |
+
```
|
model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/models/cutoff_yolov10x_fp16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd581cd4285c4e8149173353b0bab829da48cf979dc6e2672010e9a030849c12
|
3 |
+
size 66673688
|
model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/models/post_pro.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec661775a0a2bc3fac35bf15917f59d991b5b5131db57d855aa57a2543be6ca4
|
3 |
+
size 6852
|
model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/python/bus.jpg
ADDED
![]() |
Git LFS Details
|
model_farm_yolov10x_qcs8550_qnn2.31_fp16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import os
|
5 |
+
import aidlite
|
6 |
+
import argparse
|
7 |
+
import onnxruntime
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
"""返回 COCO 数据集的类别名称(80 类)。"""
|
12 |
+
classes=[
|
13 |
+
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
|
14 |
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
|
15 |
+
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
|
16 |
+
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
|
17 |
+
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
|
18 |
+
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
|
19 |
+
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
|
20 |
+
"sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
|
21 |
+
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
|
22 |
+
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
|
23 |
+
]
|
24 |
+
|
25 |
+
|
26 |
+
def letterbox(
|
27 |
+
im,
|
28 |
+
new_shape,
|
29 |
+
color=(114, 114, 114),
|
30 |
+
auto=False,
|
31 |
+
scaleFill=False,
|
32 |
+
scaleup=True,
|
33 |
+
stride=32,
|
34 |
+
):
|
35 |
+
"""
|
36 |
+
Resize and pad image while meeting stride-multiple constraints
|
37 |
+
Returns:
|
38 |
+
im (array): (height, width, 3)
|
39 |
+
ratio (array): [w_ratio, h_ratio]
|
40 |
+
(dw, dh) (array): [w_padding h_padding]
|
41 |
+
"""
|
42 |
+
shape = im.shape[:2] # current shape [height, width]
|
43 |
+
if isinstance(new_shape, int): # [h_rect, w_rect]
|
44 |
+
new_shape = (new_shape, new_shape)
|
45 |
+
|
46 |
+
# Scale ratio (new / old)
|
47 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
48 |
+
if not scaleup: # only scale down, do not scale up (for better val mAP)
|
49 |
+
r = min(r, 1.0)
|
50 |
+
|
51 |
+
# Compute padding
|
52 |
+
ratio = r, r # wh ratios
|
53 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w h
|
54 |
+
dw, dh = (
|
55 |
+
new_shape[1] - new_unpad[0],
|
56 |
+
new_shape[0] - new_unpad[1],
|
57 |
+
) # wh padding
|
58 |
+
|
59 |
+
if auto: # minimum rectangle
|
60 |
+
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
|
61 |
+
elif scaleFill: # stretch
|
62 |
+
dw, dh = 0.0, 0.0
|
63 |
+
new_unpad = (new_shape[1], new_shape[0]) # [w h]
|
64 |
+
ratio = (
|
65 |
+
new_shape[1] / shape[1],
|
66 |
+
new_shape[0] / shape[0],
|
67 |
+
) # [w_ratio, h_ratio]
|
68 |
+
|
69 |
+
dw /= 2 # divide padding into 2 sides
|
70 |
+
dh /= 2
|
71 |
+
if shape[::-1] != new_unpad: # resize
|
72 |
+
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
|
73 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
74 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
75 |
+
im = cv2.copyMakeBorder(
|
76 |
+
im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
|
77 |
+
)
|
78 |
+
return im, ratio, (dw, dh)
|
79 |
+
|
80 |
+
|
81 |
+
class Colors:
|
82 |
+
|
83 |
+
def __init__(self):
|
84 |
+
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
|
85 |
+
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
|
86 |
+
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
|
87 |
+
self.n = len(self.palette)
|
88 |
+
|
89 |
+
def __call__(self, i, bgr=False):
|
90 |
+
c = self.palette[int(i) % self.n]
|
91 |
+
return (c[2], c[1], c[0]) if bgr else c
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
def hex2rgb(h):
|
95 |
+
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
96 |
+
|
97 |
+
|
98 |
+
def rescale_coords(boxes, image_shape, input_shape):
|
99 |
+
image_height, image_width = image_shape
|
100 |
+
input_height, input_width = input_shape
|
101 |
+
scale = min(input_width / image_width, input_height / image_height)
|
102 |
+
pad_w = (input_width - image_width * scale) / 2
|
103 |
+
pad_h = (input_height - image_height * scale) / 2
|
104 |
+
boxes[:, [0, 2]] = (boxes[:, [0, 2]] - pad_w) / scale
|
105 |
+
boxes[:, [1, 3]] = (boxes[:, [1, 3]] - pad_h) / scale
|
106 |
+
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, image_width)
|
107 |
+
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, image_height)
|
108 |
+
return boxes.astype(int)
|
109 |
+
|
110 |
+
def preprocess(image, input_shape):
|
111 |
+
# Resize
|
112 |
+
input_img = letterbox(image, input_shape)[0]
|
113 |
+
# Transpose
|
114 |
+
# input_img = input_img[..., ::-1].transpose(2, 0, 1)
|
115 |
+
input_img = input_img[..., ::-1]
|
116 |
+
# Expand
|
117 |
+
input_img = input_img[np.newaxis, :, :, :].astype(np.float32)
|
118 |
+
# Contiguous
|
119 |
+
input_img = np.ascontiguousarray(input_img)
|
120 |
+
# Norm
|
121 |
+
blob = input_img / 255.0
|
122 |
+
return blob
|
123 |
+
|
124 |
+
def postprocess(output_data, conf_thres, image_shape, input_shape):
|
125 |
+
outs = output_data # test.py 中 output_data 已经是 (8400, 84)
|
126 |
+
outs = outs[outs[:, 4] >= conf_thres]
|
127 |
+
boxes = outs[:, :4]
|
128 |
+
scores = outs[:, -2]
|
129 |
+
labels = outs[:, -1].astype(int)
|
130 |
+
boxes = rescale_coords(boxes, image_shape, input_shape)
|
131 |
+
return boxes, scores, labels
|
132 |
+
|
133 |
+
class qnn_yolov10:
|
134 |
+
def __init__(self,model_path,sdk="qnn",backend="npu"):
|
135 |
+
self.config = aidlite.Config.create_instance()
|
136 |
+
if self.config is None:
|
137 |
+
print("Create config failed !")
|
138 |
+
return False
|
139 |
+
|
140 |
+
self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
141 |
+
if sdk.lower()=="qnn":
|
142 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
|
143 |
+
else :
|
144 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
145 |
+
|
146 |
+
|
147 |
+
if backend.lower() =="npu":
|
148 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
149 |
+
elif backend.lower() =="gpu":
|
150 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_GPU
|
151 |
+
else:
|
152 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_CPU
|
153 |
+
self.config.is_quantify_model = 1
|
154 |
+
|
155 |
+
self.model = aidlite.Model.create_instance(model_path)
|
156 |
+
if self.model is None:
|
157 |
+
print("Create model failed !")
|
158 |
+
return False
|
159 |
+
self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
|
160 |
+
if self.interpreter is None:
|
161 |
+
print("build_interpretper_from_model_and_config failed !")
|
162 |
+
return None
|
163 |
+
result = self.interpreter.init()
|
164 |
+
if result != 0:
|
165 |
+
print(f"interpreter init failed !")
|
166 |
+
return False
|
167 |
+
result = self.interpreter.load_model()
|
168 |
+
if result != 0:
|
169 |
+
print("interpreter load model failed !")
|
170 |
+
return False
|
171 |
+
print("detect model load success!")
|
172 |
+
|
173 |
+
def __del__(self):
|
174 |
+
self.interpreter.destory()
|
175 |
+
|
176 |
+
def __call__(self, img_input,invoke_nums):
|
177 |
+
result = self.interpreter.set_input_tensor(0, img_input.data)
|
178 |
+
if result != 0:
|
179 |
+
print("interpreter set_input_tensor() failed")
|
180 |
+
invoke_time=[]
|
181 |
+
for i in range(invoke_nums):
|
182 |
+
t1=time.time()
|
183 |
+
result = self.interpreter.invoke()
|
184 |
+
if result != 0:
|
185 |
+
print("interpreter set_input_tensor() failed")
|
186 |
+
cost_time = (time.time()-t1)*1000
|
187 |
+
invoke_time.append(cost_time)
|
188 |
+
|
189 |
+
max_invoke_time = max(invoke_time)
|
190 |
+
min_invoke_time = min(invoke_time)
|
191 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
192 |
+
var_invoketime=np.var(invoke_time)
|
193 |
+
print("====================================")
|
194 |
+
print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
195 |
+
print("====================================")
|
196 |
+
|
197 |
+
output1 = self.interpreter.get_output_tensor(0)
|
198 |
+
return output1
|
199 |
+
|
200 |
+
|
201 |
+
class onnx_yolov10:
|
202 |
+
def __init__(self,model_path):
|
203 |
+
self.sess_options = onnxruntime.SessionOptions()
|
204 |
+
self.sess_options.intra_op_num_threads = 1
|
205 |
+
self.sess = onnxruntime.InferenceSession(model_path,sess_options=self.sess_options)
|
206 |
+
self.outname = [i.name for i in self.sess.get_outputs()]
|
207 |
+
self.inname = [i.name for i in self.sess.get_inputs()]
|
208 |
+
def __call__(self,img_input):
|
209 |
+
inp = {self.inname[0]:img_input}
|
210 |
+
t1=time.time()
|
211 |
+
out_put = self.sess.run(self.outname,inp)[0]
|
212 |
+
cost_time = (time.time()-t1)*1000
|
213 |
+
return out_put
|
214 |
+
|
215 |
+
|
216 |
+
def main(args):
|
217 |
+
input_shape = (640, 640)
|
218 |
+
conf_thres = 0.25
|
219 |
+
img_path = args.imgs
|
220 |
+
invoke_nums = args.invoke_nums
|
221 |
+
qnn_path = args.target_model
|
222 |
+
|
223 |
+
# qnn +onnx推理
|
224 |
+
qnn_model1 = qnn_yolov10(qnn_path)
|
225 |
+
|
226 |
+
onnx_model_path = 'models/post_pro.onnx'
|
227 |
+
onnx_model = onnx_yolov10(onnx_model_path)
|
228 |
+
|
229 |
+
print("Begin to run qnn...")
|
230 |
+
im0 = cv2.imread(img_path)
|
231 |
+
image_shape = im0.shape[:2]
|
232 |
+
img_qnn = preprocess(im0, input_shape)
|
233 |
+
qnn_out_shape = (1,8400,84)
|
234 |
+
out1 = qnn_model1(img_qnn,invoke_nums)
|
235 |
+
out1 = out1.reshape(*qnn_out_shape)
|
236 |
+
out2 = onnx_model(out1)[0]
|
237 |
+
|
238 |
+
boxes, scores, labels = postprocess(out2, conf_thres, image_shape, input_shape)
|
239 |
+
print(f"Detect {len(boxes)} targets")
|
240 |
+
|
241 |
+
colors = Colors()
|
242 |
+
for label, score, box in zip(labels, scores, boxes):
|
243 |
+
label_text = f'{classes[label]}: {score:.2f}'
|
244 |
+
color = colors(label, True)
|
245 |
+
cv2.rectangle(im0, (box[0], box[1]), (box[2], box[3]), color, 2, lineType=cv2.LINE_AA)
|
246 |
+
cv2.putText(im0, label_text, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
247 |
+
|
248 |
+
output_image_path = "python/detected_results.jpg"
|
249 |
+
cv2.imwrite(output_image_path, im0)
|
250 |
+
print(f"Saved detected result to {output_image_path}")
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
def parser_args():
|
255 |
+
parser = argparse.ArgumentParser(description="Inferrence yolov10 model")
|
256 |
+
parser.add_argument('--target_model',type=str,default='./models/cutoff_yolov10x_fp16.qnn231.ctx.bin.aidem',help="Predict images path")
|
257 |
+
parser.add_argument('--imgs',type=str,default='./python/bus.jpg',help="Predict images path")
|
258 |
+
parser.add_argument('--invoke_nums',type=int,default=10,help="Inference nums")
|
259 |
+
args = parser.parse_args()
|
260 |
+
return args
|
261 |
+
|
262 |
+
|
263 |
+
if __name__ == "__main__":
|
264 |
+
args = parser_args()
|
265 |
+
main(args)
|
model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1,3,640,640]
|
5 |
+
- Number of parameters: 30.34M
|
6 |
+
- Model size: 112.73M
|
7 |
+
- Output shape: [1,300,6]
|
8 |
+
|
9 |
+
Source model repository: [YOLOV10-x](https://github.com/THU-MIG/yolov10)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: INT8
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: SNM972 QCS8550
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/7f3bee49-98bb-4d48-8635-3ed4c9c2bfc5)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/models/cutoff_yolov10x_w8a8.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26a37ce9faa743d3e5bcff20d5a77892bbb31a0bf7119d78d4ca772938d03208
|
3 |
+
size 32566320
|
model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/models/post_pro.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec661775a0a2bc3fac35bf15917f59d991b5b5131db57d855aa57a2543be6ca4
|
3 |
+
size 6852
|
model_farm_yolov10x_qcs8550_qnn2.31_int8_aidlite/python/bus.jpg
ADDED
![]() |
Git LFS Details
|
model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/README.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Model Information
|
2 |
+
|
3 |
+
### Source model
|
4 |
+
- Input shape: [1,3,640,640]
|
5 |
+
- Number of parameters: 30.34M
|
6 |
+
- Model size: 112.73M
|
7 |
+
- Output shape: [1,300,6]
|
8 |
+
|
9 |
+
Source model repository: [YOLOV10-x](https://github.com/THU-MIG/yolov10)
|
10 |
+
|
11 |
+
### Converted model
|
12 |
+
|
13 |
+
- Precision: W8A16
|
14 |
+
- Backend: QNN2.31
|
15 |
+
- Target Device: SNM972 QCS8550
|
16 |
+
|
17 |
+
## Model Conversion Reference
|
18 |
+
User can find model conversion reference at [aimo.aidlux.com](https://aimo.aidlux.com/#/public/bde0409b-1fed-4ea8-8019-104ccfbb511a)
|
19 |
+
|
20 |
+
## Inference with AidLite SDK
|
21 |
+
|
22 |
+
### SDK installation
|
23 |
+
Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the [AidLite Developer Documentation](https://v2.docs.aidlux.com/en/sdk-api/aidlite-sdk/)
|
24 |
+
|
25 |
+
- Install AidLite SDK
|
26 |
+
|
27 |
+
```bash
|
28 |
+
# Install the appropriate version of the aidlite sdk
|
29 |
+
sudo aid-pkg update
|
30 |
+
sudo aid-pkg install aidlite-sdk
|
31 |
+
# Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
|
32 |
+
sudo aid-pkg install aidlite-{QNN VERSION}
|
33 |
+
```
|
34 |
+
|
35 |
+
- Verify AidLite SDK
|
36 |
+
|
37 |
+
```bash
|
38 |
+
# aidlite sdk c++ check
|
39 |
+
python3 -c "import aidlite ; print(aidlite.get_library_version())"
|
40 |
+
|
41 |
+
# aidlite sdk python check
|
42 |
+
python3 -c "import aidlite ; print(aidlite.get_py_library_version())"
|
43 |
+
```
|
44 |
+
|
45 |
+
### Run Demo
|
46 |
+
|
47 |
+
#### python
|
48 |
+
> Note: The ONNX model is used in the post-processing stage, and users' own models can also reuse this ONNX model.
|
49 |
+
```bash
|
50 |
+
cd model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite
|
51 |
+
python3 python/run_test.py --target_model ./models/cutoff_yolov10x_w8a16.qnn231.ctx.bin --imgs ./python/bus.jpg --invoke_nums 10
|
52 |
+
```
|
model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/models/cutoff_yolov10x_w8a16.qnn231.ctx.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d734daf78267ddae9a2ce6d87ab317c6a637870cecb6e3e8a39459ea39d102a1
|
3 |
+
size 34180304
|
model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/models/post_pro.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec661775a0a2bc3fac35bf15917f59d991b5b5131db57d855aa57a2543be6ca4
|
3 |
+
size 6852
|
model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/python/bus.jpg
ADDED
![]() |
Git LFS Details
|
model_farm_yolov10x_qcs8550_qnn2.31_w8a16_aidlite/python/run_test.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import os
|
5 |
+
import aidlite
|
6 |
+
import argparse
|
7 |
+
import onnxruntime
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
"""返回 COCO 数据集的类别名称(80 类)。"""
|
12 |
+
classes=[
|
13 |
+
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
|
14 |
+
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
|
15 |
+
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
|
16 |
+
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
|
17 |
+
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
|
18 |
+
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
|
19 |
+
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
|
20 |
+
"sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
|
21 |
+
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
|
22 |
+
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
|
23 |
+
]
|
24 |
+
|
25 |
+
|
26 |
+
def letterbox(
|
27 |
+
im,
|
28 |
+
new_shape,
|
29 |
+
color=(114, 114, 114),
|
30 |
+
auto=False,
|
31 |
+
scaleFill=False,
|
32 |
+
scaleup=True,
|
33 |
+
stride=32,
|
34 |
+
):
|
35 |
+
"""
|
36 |
+
Resize and pad image while meeting stride-multiple constraints
|
37 |
+
Returns:
|
38 |
+
im (array): (height, width, 3)
|
39 |
+
ratio (array): [w_ratio, h_ratio]
|
40 |
+
(dw, dh) (array): [w_padding h_padding]
|
41 |
+
"""
|
42 |
+
shape = im.shape[:2] # current shape [height, width]
|
43 |
+
if isinstance(new_shape, int): # [h_rect, w_rect]
|
44 |
+
new_shape = (new_shape, new_shape)
|
45 |
+
|
46 |
+
# Scale ratio (new / old)
|
47 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
48 |
+
if not scaleup: # only scale down, do not scale up (for better val mAP)
|
49 |
+
r = min(r, 1.0)
|
50 |
+
|
51 |
+
# Compute padding
|
52 |
+
ratio = r, r # wh ratios
|
53 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w h
|
54 |
+
dw, dh = (
|
55 |
+
new_shape[1] - new_unpad[0],
|
56 |
+
new_shape[0] - new_unpad[1],
|
57 |
+
) # wh padding
|
58 |
+
|
59 |
+
if auto: # minimum rectangle
|
60 |
+
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
|
61 |
+
elif scaleFill: # stretch
|
62 |
+
dw, dh = 0.0, 0.0
|
63 |
+
new_unpad = (new_shape[1], new_shape[0]) # [w h]
|
64 |
+
ratio = (
|
65 |
+
new_shape[1] / shape[1],
|
66 |
+
new_shape[0] / shape[0],
|
67 |
+
) # [w_ratio, h_ratio]
|
68 |
+
|
69 |
+
dw /= 2 # divide padding into 2 sides
|
70 |
+
dh /= 2
|
71 |
+
if shape[::-1] != new_unpad: # resize
|
72 |
+
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
|
73 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
74 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
75 |
+
im = cv2.copyMakeBorder(
|
76 |
+
im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
|
77 |
+
)
|
78 |
+
return im, ratio, (dw, dh)
|
79 |
+
|
80 |
+
|
81 |
+
class Colors:
|
82 |
+
|
83 |
+
def __init__(self):
|
84 |
+
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
|
85 |
+
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
|
86 |
+
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
|
87 |
+
self.n = len(self.palette)
|
88 |
+
|
89 |
+
def __call__(self, i, bgr=False):
|
90 |
+
c = self.palette[int(i) % self.n]
|
91 |
+
return (c[2], c[1], c[0]) if bgr else c
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
def hex2rgb(h):
|
95 |
+
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
96 |
+
|
97 |
+
|
98 |
+
def rescale_coords(boxes, image_shape, input_shape):
|
99 |
+
image_height, image_width = image_shape
|
100 |
+
input_height, input_width = input_shape
|
101 |
+
scale = min(input_width / image_width, input_height / image_height)
|
102 |
+
pad_w = (input_width - image_width * scale) / 2
|
103 |
+
pad_h = (input_height - image_height * scale) / 2
|
104 |
+
boxes[:, [0, 2]] = (boxes[:, [0, 2]] - pad_w) / scale
|
105 |
+
boxes[:, [1, 3]] = (boxes[:, [1, 3]] - pad_h) / scale
|
106 |
+
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, image_width)
|
107 |
+
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, image_height)
|
108 |
+
return boxes.astype(int)
|
109 |
+
|
110 |
+
def preprocess(image, input_shape):
|
111 |
+
# Resize
|
112 |
+
input_img = letterbox(image, input_shape)[0]
|
113 |
+
# Transpose
|
114 |
+
# input_img = input_img[..., ::-1].transpose(2, 0, 1)
|
115 |
+
input_img = input_img[..., ::-1]
|
116 |
+
# Expand
|
117 |
+
input_img = input_img[np.newaxis, :, :, :].astype(np.float32)
|
118 |
+
# Contiguous
|
119 |
+
input_img = np.ascontiguousarray(input_img)
|
120 |
+
# Norm
|
121 |
+
blob = input_img / 255.0
|
122 |
+
return blob
|
123 |
+
|
124 |
+
def postprocess(output_data, conf_thres, image_shape, input_shape):
|
125 |
+
outs = output_data # test.py 中 output_data 已经是 (8400, 84)
|
126 |
+
outs = outs[outs[:, 4] >= conf_thres]
|
127 |
+
boxes = outs[:, :4]
|
128 |
+
scores = outs[:, -2]
|
129 |
+
labels = outs[:, -1].astype(int)
|
130 |
+
boxes = rescale_coords(boxes, image_shape, input_shape)
|
131 |
+
return boxes, scores, labels
|
132 |
+
|
133 |
+
class qnn_yolov10:
|
134 |
+
def __init__(self,model_path,sdk="qnn",backend="npu"):
|
135 |
+
self.config = aidlite.Config.create_instance()
|
136 |
+
if self.config is None:
|
137 |
+
print("Create config failed !")
|
138 |
+
return False
|
139 |
+
|
140 |
+
self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
|
141 |
+
if sdk.lower()=="qnn":
|
142 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
|
143 |
+
else :
|
144 |
+
self.config.framework_type = aidlite.FrameworkType.TYPE_SNPE2
|
145 |
+
|
146 |
+
|
147 |
+
if backend.lower() =="npu":
|
148 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
|
149 |
+
elif backend.lower() =="gpu":
|
150 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_GPU
|
151 |
+
else:
|
152 |
+
self.config.accelerate_type = aidlite.AccelerateType.TYPE_CPU
|
153 |
+
self.config.is_quantify_model = 1
|
154 |
+
|
155 |
+
self.model = aidlite.Model.create_instance(model_path)
|
156 |
+
if self.model is None:
|
157 |
+
print("Create model failed !")
|
158 |
+
return False
|
159 |
+
self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
|
160 |
+
if self.interpreter is None:
|
161 |
+
print("build_interpretper_from_model_and_config failed !")
|
162 |
+
return None
|
163 |
+
result = self.interpreter.init()
|
164 |
+
if result != 0:
|
165 |
+
print(f"interpreter init failed !")
|
166 |
+
return False
|
167 |
+
result = self.interpreter.load_model()
|
168 |
+
if result != 0:
|
169 |
+
print("interpreter load model failed !")
|
170 |
+
return False
|
171 |
+
print("detect model load success!")
|
172 |
+
|
173 |
+
def __del__(self):
|
174 |
+
self.interpreter.destory()
|
175 |
+
|
176 |
+
def __call__(self, img_input,invoke_nums):
|
177 |
+
result = self.interpreter.set_input_tensor(0, img_input.data)
|
178 |
+
if result != 0:
|
179 |
+
print("interpreter set_input_tensor() failed")
|
180 |
+
invoke_time=[]
|
181 |
+
for i in range(invoke_nums):
|
182 |
+
t1=time.time()
|
183 |
+
result = self.interpreter.invoke()
|
184 |
+
if result != 0:
|
185 |
+
print("interpreter set_input_tensor() failed")
|
186 |
+
cost_time = (time.time()-t1)*1000
|
187 |
+
invoke_time.append(cost_time)
|
188 |
+
|
189 |
+
max_invoke_time = max(invoke_time)
|
190 |
+
min_invoke_time = min(invoke_time)
|
191 |
+
mean_invoke_time = sum(invoke_time)/invoke_nums
|
192 |
+
var_invoketime=np.var(invoke_time)
|
193 |
+
print("====================================")
|
194 |
+
print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
|
195 |
+
print("====================================")
|
196 |
+
|
197 |
+
output1 = self.interpreter.get_output_tensor(0)
|
198 |
+
return output1
|
199 |
+
|
200 |
+
|
201 |
+
class onnx_yolov10:
|
202 |
+
def __init__(self,model_path):
|
203 |
+
self.sess_options = onnxruntime.SessionOptions()
|
204 |
+
self.sess_options.intra_op_num_threads = 1
|
205 |
+
self.sess = onnxruntime.InferenceSession(model_path,sess_options=self.sess_options)
|
206 |
+
self.outname = [i.name for i in self.sess.get_outputs()]
|
207 |
+
self.inname = [i.name for i in self.sess.get_inputs()]
|
208 |
+
def __call__(self,img_input):
|
209 |
+
inp = {self.inname[0]:img_input}
|
210 |
+
t1=time.time()
|
211 |
+
out_put = self.sess.run(self.outname,inp)[0]
|
212 |
+
cost_time = (time.time()-t1)*1000
|
213 |
+
return out_put
|
214 |
+
|
215 |
+
|
216 |
+
def main(args):
|
217 |
+
input_shape = (640, 640)
|
218 |
+
conf_thres = 0.25
|
219 |
+
img_path = args.imgs
|
220 |
+
invoke_nums = args.invoke_nums
|
221 |
+
qnn_path = args.target_model
|
222 |
+
|
223 |
+
# qnn +onnx推理
|
224 |
+
qnn_model1 = qnn_yolov10(qnn_path)
|
225 |
+
|
226 |
+
onnx_model_path = 'models/post_pro.onnx'
|
227 |
+
onnx_model = onnx_yolov10(onnx_model_path)
|
228 |
+
|
229 |
+
print("Begin to run qnn...")
|
230 |
+
im0 = cv2.imread(img_path)
|
231 |
+
image_shape = im0.shape[:2]
|
232 |
+
img_qnn = preprocess(im0, input_shape)
|
233 |
+
qnn_out_shape = (1,8400,84)
|
234 |
+
out1 = qnn_model1(img_qnn,invoke_nums)
|
235 |
+
out1 = out1.reshape(*qnn_out_shape)
|
236 |
+
out2 = onnx_model(out1)[0]
|
237 |
+
|
238 |
+
boxes, scores, labels = postprocess(out2, conf_thres, image_shape, input_shape)
|
239 |
+
print(f"Detect {len(boxes)} targets")
|
240 |
+
|
241 |
+
colors = Colors()
|
242 |
+
for label, score, box in zip(labels, scores, boxes):
|
243 |
+
label_text = f'{classes[label]}: {score:.2f}'
|
244 |
+
color = colors(label, True)
|
245 |
+
cv2.rectangle(im0, (box[0], box[1]), (box[2], box[3]), color, 2, lineType=cv2.LINE_AA)
|
246 |
+
cv2.putText(im0, label_text, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
247 |
+
|
248 |
+
output_image_path = "python/detected_results.jpg"
|
249 |
+
cv2.imwrite(output_image_path, im0)
|
250 |
+
print(f"Saved detected result to {output_image_path}")
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
def parser_args():
|
255 |
+
parser = argparse.ArgumentParser(description="Inferrence yolov10 model")
|
256 |
+
parser.add_argument('--target_model',type=str,default='./models/cutoff_yolov10x_w8a16.qnn231.ctx.bin.aidem',help="Predict images path")
|
257 |
+
parser.add_argument('--imgs',type=str,default='./python/bus.jpg',help="Predict images path")
|
258 |
+
parser.add_argument('--invoke_nums',type=int,default=10,help="Inference nums")
|
259 |
+
args = parser.parse_args()
|
260 |
+
return args
|
261 |
+
|
262 |
+
|
263 |
+
if __name__ == "__main__":
|
264 |
+
args = parser_args()
|
265 |
+
main(args)
|