Satyam Goyal
commited on
Commit
·
bd5223f
1
Parent(s):
a9555be
Merge pull request #95 from Satgoy152:adding-doc
Browse filesImproved help messages for demo programs (#95)
- Added Demo Documentation
- Updated help messages
- Changed exception link
README.md
CHANGED
@@ -1,20 +1,25 @@
|
|
1 |
# Palm detector from MediaPipe Handpose
|
2 |
|
3 |
This model detects palm bounding boxes and palm landmarks, and is converted from Tensorflow-JS to ONNX using following tools:
|
|
|
4 |
- tfjs to tf_saved_model: https://github.com/patlevin/tfjs-to-tf/
|
5 |
- tf_saved_model to ONNX: https://github.com/onnx/tensorflow-onnx
|
6 |
- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier)
|
7 |
|
8 |
-
Also note that the model is quantized in per-channel mode with [Intel
|
9 |
|
10 |
## Demo
|
11 |
|
12 |
Run the following commands to try the demo:
|
|
|
13 |
```bash
|
14 |
# detect on camera input
|
15 |
python demo.py
|
16 |
# detect on an image
|
17 |
python demo.py -i /path/to/image
|
|
|
|
|
|
|
18 |
```
|
19 |
|
20 |
### Example outputs
|
|
|
1 |
# Palm detector from MediaPipe Handpose
|
2 |
|
3 |
This model detects palm bounding boxes and palm landmarks, and is converted from Tensorflow-JS to ONNX using following tools:
|
4 |
+
|
5 |
- tfjs to tf_saved_model: https://github.com/patlevin/tfjs-to-tf/
|
6 |
- tf_saved_model to ONNX: https://github.com/onnx/tensorflow-onnx
|
7 |
- simplified by [onnx-simplifier](https://github.com/daquexian/onnx-simplifier)
|
8 |
|
9 |
+
Also note that the model is quantized in per-channel mode with [Intel's neural compressor](https://github.com/intel/neural-compressor), which gives better accuracy but may lose some speed.
|
10 |
|
11 |
## Demo
|
12 |
|
13 |
Run the following commands to try the demo:
|
14 |
+
|
15 |
```bash
|
16 |
# detect on camera input
|
17 |
python demo.py
|
18 |
# detect on an image
|
19 |
python demo.py -i /path/to/image
|
20 |
+
|
21 |
+
# get help regarding various parameters
|
22 |
+
python demo.py --help
|
23 |
```
|
24 |
|
25 |
### Example outputs
|
demo.py
CHANGED
@@ -23,17 +23,17 @@ try:
|
|
23 |
help_msg_backends += "; {:d}: TIMVX"
|
24 |
help_msg_targets += "; {:d}: NPU"
|
25 |
except:
|
26 |
-
print('This version of OpenCV does not support TIM-VX and NPU. Visit https://
|
27 |
|
28 |
parser = argparse.ArgumentParser(description='Hand Detector from MediaPipe')
|
29 |
-
parser.add_argument('--input', '-i', type=str, help='
|
30 |
-
parser.add_argument('--model', '-m', type=str, default='./palm_detection_mediapipe_2022may.onnx', help='
|
31 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
32 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
33 |
-
parser.add_argument('--score_threshold', type=float, default=0.99, help='Filter out faces of confidence < conf_threshold. An empirical score threshold for the quantized model is 0.49.')
|
34 |
-
parser.add_argument('--nms_threshold', type=float, default=0.3, help='Suppress bounding boxes of iou >= nms_threshold.')
|
35 |
-
parser.add_argument('--save', '-s', type=str, default=False, help='Set
|
36 |
-
parser.add_argument('--vis', '-v', type=str2bool, default=True, help='
|
37 |
args = parser.parse_args()
|
38 |
|
39 |
def visualize(image, results, print_results=False, fps=None):
|
|
|
23 |
help_msg_backends += "; {:d}: TIMVX"
|
24 |
help_msg_targets += "; {:d}: NPU"
|
25 |
except:
|
26 |
+
print('This version of OpenCV does not support TIM-VX and NPU. Visit https://github.com/opencv/opencv/wiki/TIM-VX-Backend-For-Running-OpenCV-On-NPU for more information.')
|
27 |
|
28 |
parser = argparse.ArgumentParser(description='Hand Detector from MediaPipe')
|
29 |
+
parser.add_argument('--input', '-i', type=str, help='Usage: Set path to the input image. Omit for using default camera.')
|
30 |
+
parser.add_argument('--model', '-m', type=str, default='./palm_detection_mediapipe_2022may.onnx', help='Usage: Set model path, defaults to palm_detection_mediapipe_2022may.onnx.')
|
31 |
parser.add_argument('--backend', '-b', type=int, default=backends[0], help=help_msg_backends.format(*backends))
|
32 |
parser.add_argument('--target', '-t', type=int, default=targets[0], help=help_msg_targets.format(*targets))
|
33 |
+
parser.add_argument('--score_threshold', type=float, default=0.99, help='Usage: Set the minimum needed confidence for the model to identify a palm, defaults to 0.99. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold. An empirical score threshold for the quantized model is 0.49.')
|
34 |
+
parser.add_argument('--nms_threshold', type=float, default=0.3, help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.')
|
35 |
+
parser.add_argument('--save', '-s', type=str, default=False, help='Usage: Set “True” to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input. Default will be set to “False”.')
|
36 |
+
parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Usage: Default will be set to “True” and will open a new window to show results. Set to “False” to stop visualizations from being shown. Invalid in case of camera input.')
|
37 |
args = parser.parse_args()
|
38 |
|
39 |
def visualize(image, results, print_results=False, fps=None):
|