DaniAffCH commited on
Commit
e8dca96
·
1 Parent(s): 30e431e

C++ Demo - Human Segmentation (#243)

Browse files

* add human segmentation c++ demo

* removed debug print and update README

* inverted colors for consistency

* adjusted blending weight for visualization

models/human_segmentation_pphumanseg/CMakeLists.txt ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.24)
2
+ set(CMAKE_CXX_STANDARD 11)
3
+ set(project_name "opencv_zoo_human_segmentation")
4
+
5
+ PROJECT (${project_name})
6
+
7
+ set(OPENCV_VERSION "4.9.0")
8
+ set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation")
9
+ find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH})
10
+ # Find OpenCV, you may need to set OpenCV_DIR variable
11
+ # to the absolute path to the directory containing OpenCVConfig.cmake file
12
+ # via the command line or GUI
13
+
14
+ file(GLOB SourceFile
15
+ "demo.cpp")
16
+ # If the package has been found, several variables will
17
+ # be set, you can find the full list with descriptions
18
+ # in the OpenCVConfig.cmake file.
19
+ # Print some message showing some of them
20
+ message(STATUS "OpenCV library status:")
21
+ message(STATUS " config: ${OpenCV_DIR}")
22
+ message(STATUS " version: ${OpenCV_VERSION}")
23
+ message(STATUS " libraries: ${OpenCV_LIBS}")
24
+ message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
25
+
26
+ # Declare the executable target built from your sources
27
+ add_executable(${project_name} ${SourceFile})
28
+
29
+ # Link your application with OpenCV libraries
30
+ target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS})
31
+
models/human_segmentation_pphumanseg/README.md CHANGED
@@ -4,6 +4,8 @@ This model is ported from [PaddleHub](https://github.com/PaddlePaddle/PaddleHub)
4
 
5
  ## Demo
6
 
 
 
7
  Run the following command to try the demo:
8
 
9
  ```shell
@@ -16,6 +18,23 @@ python demo.py --input /path/to/image -v
16
  python demo.py --help
17
  ```
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  ### Example outputs
20
 
21
  ![webcam demo](./example_outputs/pphumanseg_demo.gif)
 
4
 
5
  ## Demo
6
 
7
+ ### Python
8
+
9
  Run the following command to try the demo:
10
 
11
  ```shell
 
18
  python demo.py --help
19
  ```
20
 
21
+ ### C++
22
+
23
+ Install latest OpenCV and CMake >= 3.24.0 to get started with:
24
+
25
+ ```shell
26
+ # A typical and default installation path of OpenCV is /usr/local
27
+ cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation .
28
+ cmake --build build
29
+
30
+ # detect on camera input
31
+ ./build/opencv_zoo_human_segmentation
32
+ # detect on an image
33
+ ./build/opencv_zoo_human_segmentation -i=/path/to/image
34
+ # get help messages
35
+ ./build/opencv_zoo_human_segmentation -h
36
+ ```
37
+
38
  ### Example outputs
39
 
40
  ![webcam demo](./example_outputs/pphumanseg_demo.gif)
models/human_segmentation_pphumanseg/demo.cpp ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "opencv2/opencv.hpp"
2
+
3
+ #include <map>
4
+ #include <vector>
5
+ #include <string>
6
+ #include <iostream>
7
+
8
+ using namespace std;
9
+ using namespace cv;
10
+ using namespace dnn;
11
+
12
+ std::vector<std::pair<int, int>> backend_target_pairs = {
13
+ {DNN_BACKEND_OPENCV, DNN_TARGET_CPU},
14
+ {DNN_BACKEND_CUDA, DNN_TARGET_CUDA},
15
+ {DNN_BACKEND_CUDA, DNN_TARGET_CUDA_FP16},
16
+ {DNN_BACKEND_TIMVX, DNN_TARGET_NPU},
17
+ {DNN_BACKEND_CANN, DNN_TARGET_NPU}
18
+ };
19
+
20
+ class PPHS
21
+ {
22
+ private:
23
+ Net model;
24
+ string modelPath;
25
+
26
+ Scalar imageMean = Scalar(0.5,0.5,0.5);
27
+ Scalar imageStd = Scalar(0.5,0.5,0.5);
28
+ Size modelInputSize = Size(192, 192);
29
+ Size currentSize;
30
+
31
+ const String inputNames = "x";
32
+ const String outputNames = "save_infer_model/scale_0.tmp_1";
33
+
34
+ int backend_id;
35
+ int target_id;
36
+
37
+ public:
38
+ PPHS(const string& modelPath,
39
+ int backend_id = 0,
40
+ int target_id = 0)
41
+ : modelPath(modelPath), backend_id(backend_id), target_id(target_id)
42
+ {
43
+ this->model = readNet(modelPath);
44
+ this->model.setPreferableBackend(backend_id);
45
+ this->model.setPreferableTarget(target_id);
46
+ }
47
+
48
+ Mat preprocess(const Mat image)
49
+ {
50
+ this->currentSize = image.size();
51
+ Mat preprocessed = Mat::zeros(this->modelInputSize, image.type());
52
+ resize(image, preprocessed, this->modelInputSize);
53
+
54
+ // image normalization
55
+ preprocessed.convertTo(preprocessed, CV_32F, 1.0 / 255.0);
56
+ preprocessed -= imageMean;
57
+ preprocessed /= imageStd;
58
+
59
+ return blobFromImage(preprocessed);;
60
+ }
61
+
62
+ Mat infer(const Mat image)
63
+ {
64
+ Mat inputBlob = preprocess(image);
65
+
66
+ this->model.setInput(inputBlob, this->inputNames);
67
+ Mat outputBlob = this->model.forward(this->outputNames);
68
+
69
+ return postprocess(outputBlob);
70
+ }
71
+
72
+ Mat postprocess(Mat image)
73
+ {
74
+ reduceArgMax(image,image,1);
75
+ image = image.reshape(1,image.size[2]);
76
+ image.convertTo(image, CV_32F);
77
+ resize(image, image, this->currentSize, 0, 0, INTER_LINEAR);
78
+ image.convertTo(image, CV_8U);
79
+
80
+ return image;
81
+ }
82
+
83
+ };
84
+
85
+
86
+ vector<uint8_t> getColorMapList(int num_classes) {
87
+ num_classes += 1;
88
+
89
+ vector<uint8_t> cm(num_classes*3, 0);
90
+
91
+ int lab, j;
92
+
93
+ for (int i = 0; i < num_classes; ++i) {
94
+ lab = i;
95
+ j = 0;
96
+
97
+ while(lab){
98
+ cm[i] |= (((lab >> 0) & 1) << (7 - j));
99
+ cm[i+num_classes] |= (((lab >> 1) & 1) << (7 - j));
100
+ cm[i+2*num_classes] |= (((lab >> 2) & 1) << (7 - j));
101
+ ++j;
102
+ lab >>= 3;
103
+ }
104
+
105
+ }
106
+
107
+ cm.erase(cm.begin(), cm.begin()+3);
108
+
109
+ return cm;
110
+ };
111
+
112
+ Mat visualize(const Mat& image, const Mat& result, float fps = -1.f, float weight = 0.4)
113
+ {
114
+ const Scalar& text_color = Scalar(0, 255, 0);
115
+ Mat output_image = image.clone();
116
+
117
+ vector<uint8_t> color_map = getColorMapList(256);
118
+
119
+ Mat cmm(color_map);
120
+
121
+ cmm = cmm.reshape(1,{3,256});
122
+
123
+ if (fps >= 0)
124
+ {
125
+ putText(output_image, format("FPS: %.2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2);
126
+ }
127
+
128
+ Mat c1, c2, c3;
129
+
130
+ LUT(result, cmm.row(0), c1);
131
+ LUT(result, cmm.row(1), c2);
132
+ LUT(result, cmm.row(2), c3);
133
+
134
+ Mat pseudo_img;
135
+ merge(std::vector<Mat>{c1,c2,c3}, pseudo_img);
136
+
137
+ addWeighted(output_image, weight, pseudo_img, 1 - weight, 0, output_image);
138
+
139
+ return output_image;
140
+ };
141
+
142
+ string keys =
143
+ "{ help h | | Print help message. }"
144
+ "{ model m | human_segmentation_pphumanseg_2023mar.onnx | Usage: Path to the model, defaults to human_segmentation_pphumanseg_2023mar.onnx }"
145
+ "{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
146
+ "{ backend_target t | 0 | Choose one of the backend-target pair to run this demo:\n"
147
+ "0: (default) OpenCV implementation + CPU,\n"
148
+ "1: CUDA + GPU (CUDA),\n"
149
+ "2: CUDA + GPU (CUDA FP16),\n"
150
+ "3: TIM-VX + NPU,\n"
151
+ "4: CANN + NPU}"
152
+ "{ save s | false | Specify to save results.}"
153
+ "{ vis v | true | Specify to open a window for result visualization.}"
154
+ ;
155
+
156
+
157
+ int main(int argc, char** argv)
158
+ {
159
+ CommandLineParser parser(argc, argv, keys);
160
+
161
+ parser.about("Human Segmentation");
162
+ if (parser.has("help"))
163
+ {
164
+ parser.printMessage();
165
+ return 0;
166
+ }
167
+
168
+ string modelPath = parser.get<string>("model");
169
+ string inputPath = parser.get<string>("input");
170
+ uint8_t backendTarget = parser.get<uint8_t>("backend_target");
171
+ bool saveFlag = parser.get<bool>("save");
172
+ bool visFlag = parser.get<bool>("vis");
173
+
174
+ if (modelPath.empty())
175
+ CV_Error(Error::StsError, "Model file " + modelPath + " not found");
176
+
177
+ PPHS humanSegmentationModel(modelPath, backend_target_pairs[backendTarget].first, backend_target_pairs[backendTarget].second);
178
+
179
+ VideoCapture cap;
180
+ if (!inputPath.empty())
181
+ cap.open(samples::findFile(inputPath));
182
+ else
183
+ cap.open(0);
184
+
185
+ if (!cap.isOpened())
186
+ CV_Error(Error::StsError, "Cannot opend video or file");
187
+
188
+ Mat frame;
189
+ Mat result;
190
+ static const std::string kWinName = "Human Segmentation Demo";
191
+ TickMeter tm;
192
+
193
+ while (waitKey(1) < 0)
194
+ {
195
+ cap >> frame;
196
+
197
+ if (frame.empty())
198
+ {
199
+ if(inputPath.empty())
200
+ cout << "Frame is empty" << endl;
201
+ break;
202
+ }
203
+
204
+ tm.start();
205
+ result = humanSegmentationModel.infer(frame);
206
+ tm.stop();
207
+
208
+ Mat res_frame = visualize(frame, result, tm.getFPS());
209
+
210
+ if(visFlag || inputPath.empty())
211
+ {
212
+ imshow(kWinName, res_frame);
213
+ if(!inputPath.empty())
214
+ waitKey(0);
215
+ }
216
+ if(saveFlag)
217
+ {
218
+ cout << "Results are saved to result.jpg" << endl;
219
+
220
+ imwrite("result.jpg", res_frame);
221
+ }
222
+ }
223
+
224
+ return 0;
225
+ }
226
+
models/human_segmentation_pphumanseg/demo.py CHANGED
@@ -83,8 +83,8 @@ def visualize(image, result, weight=0.6, fps=None):
83
  vis_result (np.ndarray): The visualized result.
84
  """
85
  color_map = get_color_map_list(256)
86
- color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
87
- color_map = np.array(color_map).astype(np.uint8)
88
  # Use OpenCV LUT for color mapping
89
  c1 = cv.LUT(result, color_map[:, 0])
90
  c2 = cv.LUT(result, color_map[:, 1])
@@ -158,3 +158,4 @@ if __name__ == '__main__':
158
  cv.imshow('PPHumanSeg Demo', frame)
159
 
160
  tm.reset()
 
 
83
  vis_result (np.ndarray): The visualized result.
84
  """
85
  color_map = get_color_map_list(256)
86
+ color_map = np.array(color_map).reshape(256, 3).astype(np.uint8)
87
+
88
  # Use OpenCV LUT for color mapping
89
  c1 = cv.LUT(result, color_map[:, 0])
90
  c2 = cv.LUT(result, color_map[:, 1])
 
158
  cv.imshow('PPHumanSeg Demo', frame)
159
 
160
  tm.reset()
161
+