Ryan Lee commited on
Commit
1528081
·
1 Parent(s): a8f2672

Fix spelling, spacing, and unused variables (#236)

Browse files

* Fix spelling, spacing, and unused variables

* Fixed typo in 3 more models

* Added open back

* Remove setBackendAndTarget functions from C++ demos, which are unused functionality.

models/face_detection_yunet/demo.cpp CHANGED
@@ -31,13 +31,6 @@ public:
31
  model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
32
  }
33
 
34
- void setBackendAndTarget(int backend_id, int target_id)
35
- {
36
- backend_id_ = backend_id;
37
- target_id_ = target_id;
38
- model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
39
- }
40
-
41
  /* Overwrite the input size when creating the model. Size format: [Width, Height].
42
  */
43
  void setInputSize(const cv::Size& input_size)
 
31
  model = cv::FaceDetectorYN::create(model_path_, "", input_size_, conf_threshold_, nms_threshold_, top_k_, backend_id_, target_id_);
32
  }
33
 
 
 
 
 
 
 
 
34
  /* Overwrite the input size when creating the model. Size format: [Width, Height].
35
  */
36
  void setInputSize(const cv::Size& input_size)
models/image_classification_mobilenet/demo.cpp CHANGED
@@ -69,7 +69,7 @@ int main(int argc, char** argv)
69
  else
70
  cap.open(0);
71
  if (!cap.isOpened())
72
- CV_Error(Error::StsError, "Cannot opend video or file");
73
  Mat frame, blob;
74
  static const std::string kWinName = model;
75
  int nbInference = 0;
 
69
  else
70
  cap.open(0);
71
  if (!cap.isOpened())
72
+ CV_Error(Error::StsError, "Cannot open video or file");
73
  Mat frame, blob;
74
  static const std::string kWinName = model;
75
  int nbInference = 0;
models/object_detection_nanodet/demo.cpp CHANGED
@@ -46,7 +46,8 @@ public:
46
  {
47
  this->strides = { 8, 16, 32, 64 };
48
  this->net = readNet(modelPath);
49
- setBackendAndTarget(bId, tId);
 
50
  this->project = Mat::zeros(1, this->regMax + 1, CV_32F);
51
  for (size_t i = 0; i <= this->regMax; ++i)
52
  {
@@ -57,12 +58,6 @@ public:
57
  this->generateAnchors();
58
  }
59
 
60
- void setBackendAndTarget(Backend bId, Target tId)
61
- {
62
- this->net.setPreferableBackend(bId);
63
- this->net.setPreferableTarget(tId);
64
- }
65
-
66
  Mat preProcess(const Mat& inputImage)
67
  {
68
  Image2BlobParams paramNanodet;
 
46
  {
47
  this->strides = { 8, 16, 32, 64 };
48
  this->net = readNet(modelPath);
49
+ this->net.setPreferableBackend(bId);
50
+ this->net.setPreferableTarget(tId);
51
  this->project = Mat::zeros(1, this->regMax + 1, CV_32F);
52
  for (size_t i = 0; i <= this->regMax; ++i)
53
  {
 
58
  this->generateAnchors();
59
  }
60
 
 
 
 
 
 
 
61
  Mat preProcess(const Mat& inputImage)
62
  {
63
  Image2BlobParams paramNanodet;
models/object_detection_nanodet/demo.py CHANGED
@@ -148,7 +148,7 @@ if __name__=='__main__':
148
  img = vis(preds, image, letterbox_scale)
149
 
150
  if args.save:
151
- print('Resutls saved to result.jpg\n')
152
  cv.imwrite('result.jpg', img)
153
 
154
  if args.vis:
 
148
  img = vis(preds, image, letterbox_scale)
149
 
150
  if args.save:
151
+ print('Results saved to result.jpg\n')
152
  cv.imwrite('result.jpg', img)
153
 
154
  if args.vis:
models/object_detection_yolox/demo.cpp CHANGED
@@ -61,14 +61,6 @@ public:
61
  this->generateAnchors();
62
  }
63
 
64
- void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
65
- {
66
- this->backendId = bId;
67
- this->targetId = tId;
68
- this->net.setPreferableBackend(this->backendId);
69
- this->net.setPreferableTarget(this->targetId);
70
- }
71
-
72
  Mat preprocess(Mat img)
73
  {
74
  Mat blob;
@@ -137,7 +129,7 @@ public:
137
  boxesXYXY[r].height = boxes_xyxy.at<float>(r, 3);
138
  }
139
 
140
- vector< int > keep;
141
  NMSBoxesBatched(boxesXYXY, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep);
142
  Mat candidates(int(keep.size()), 6, CV_32FC1);
143
  int row = 0;
@@ -282,7 +274,7 @@ int main(int argc, char** argv)
282
  else
283
  cap.open(0);
284
  if (!cap.isOpened())
285
- CV_Error(Error::StsError, "Cannot opend video or file");
286
  Mat frame, inputBlob;
287
  double letterboxScale;
288
 
 
61
  this->generateAnchors();
62
  }
63
 
 
 
 
 
 
 
 
 
64
  Mat preprocess(Mat img)
65
  {
66
  Mat blob;
 
129
  boxesXYXY[r].height = boxes_xyxy.at<float>(r, 3);
130
  }
131
 
132
+ vector<int> keep;
133
  NMSBoxesBatched(boxesXYXY, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep);
134
  Mat candidates(int(keep.size()), 6, CV_32FC1);
135
  int row = 0;
 
274
  else
275
  cap.open(0);
276
  if (!cap.isOpened())
277
+ CV_Error(Error::StsError, "Cannot open video or file");
278
  Mat frame, inputBlob;
279
  double letterboxScale;
280
 
models/object_detection_yolox/demo.py CHANGED
@@ -120,7 +120,7 @@ if __name__=='__main__':
120
  img = vis(preds, image, letterbox_scale)
121
 
122
  if args.save:
123
- print('Resutls saved to result.jpg\n')
124
  cv.imwrite('result.jpg', img)
125
 
126
  if args.vis:
 
120
  img = vis(preds, image, letterbox_scale)
121
 
122
  if args.save:
123
+ print('Results saved to result.jpg\n')
124
  cv.imwrite('result.jpg', img)
125
 
126
  if args.vis:
models/person_detection_mediapipe/demo.cpp CHANGED
@@ -43,14 +43,6 @@ public:
43
  this->anchors = getMediapipeAnchor();
44
  }
45
 
46
- void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
47
- {
48
- this->backendId = bId;
49
- this->targetId = tId;
50
- this->net.setPreferableBackend(this->backendId);
51
- this->net.setPreferableTarget(this->targetId);
52
- }
53
-
54
  pair<Mat, Size> preprocess(Mat img)
55
  {
56
  Mat blob;
@@ -237,10 +229,9 @@ int main(int argc, char** argv)
237
  backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
238
  //! [Open a video file or an image file or a camera stream]
239
  if (!cap.isOpened())
240
- CV_Error(Error::StsError, "Cannot opend video or file");
241
 
242
  static const std::string kWinName = "MPPersonDet Demo";
243
- int nbInference = 0;
244
  while (waitKey(1) < 0)
245
  {
246
  cap >> frame;
 
43
  this->anchors = getMediapipeAnchor();
44
  }
45
 
 
 
 
 
 
 
 
 
46
  pair<Mat, Size> preprocess(Mat img)
47
  {
48
  Mat blob;
 
229
  backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
230
  //! [Open a video file or an image file or a camera stream]
231
  if (!cap.isOpened())
232
+ CV_Error(Error::StsError, "Cannot open video or file");
233
 
234
  static const std::string kWinName = "MPPersonDet Demo";
 
235
  while (waitKey(1) < 0)
236
  {
237
  cap >> frame;
models/pose_estimation_mediapipe/demo.cpp CHANGED
@@ -45,14 +45,6 @@ public:
45
  this->anchors = getMediapipeAnchor();
46
  }
47
 
48
- void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
49
- {
50
- this->backendId = bId;
51
- this->targetId = tId;
52
- this->net.setPreferableBackend(this->backendId);
53
- this->net.setPreferableTarget(this->targetId);
54
- }
55
-
56
  pair<Mat, Size> preprocess(Mat img)
57
  {
58
  Mat blob;
@@ -124,7 +116,7 @@ public:
124
  {
125
  rotBoxes[i] = Rect2d(Point2d(boxes.at<float>(i, 0), boxes.at<float>(i, 1)), Point2d(boxes.at<float>(i, 2), boxes.at<float>(i, 3)));
126
  }
127
- vector< int > keep;
128
  NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, 1.0f, this->topK);
129
  if (keep.size() == 0)
130
  return Mat();
@@ -179,14 +171,6 @@ public:
179
  this->personBoxEnlargeFactor = 1.25;
180
  }
181
 
182
- void setBackendAndTarget(dnn::Backend bId, dnn::Target tId)
183
- {
184
- this->backendId = bId;
185
- this->targetId = tId;
186
- this->net.setPreferableBackend(this->backendId);
187
- this->net.setPreferableTarget(this->targetId);
188
- }
189
-
190
  tuple<Mat, Mat, float, Mat, Size> preprocess(Mat image, Mat person)
191
  {
192
  /***
@@ -567,7 +551,7 @@ int main(int argc, char** argv)
567
  MPPose poseEstimator(model, confThreshold, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
568
  //! [Open a video file or an image file or a camera stream]
569
  if (!cap.isOpened())
570
- CV_Error(Error::StsError, "Cannot opend video or file");
571
 
572
  static const std::string kWinName = "MPPose Demo";
573
  while (waitKey(1) < 0)
 
45
  this->anchors = getMediapipeAnchor();
46
  }
47
 
 
 
 
 
 
 
 
 
48
  pair<Mat, Size> preprocess(Mat img)
49
  {
50
  Mat blob;
 
116
  {
117
  rotBoxes[i] = Rect2d(Point2d(boxes.at<float>(i, 0), boxes.at<float>(i, 1)), Point2d(boxes.at<float>(i, 2), boxes.at<float>(i, 3)));
118
  }
119
+ vector<int> keep;
120
  NMSBoxes(rotBoxes, score, this->scoreThreshold, this->nmsThreshold, keep, 1.0f, this->topK);
121
  if (keep.size() == 0)
122
  return Mat();
 
171
  this->personBoxEnlargeFactor = 1.25;
172
  }
173
 
 
 
 
 
 
 
 
 
174
  tuple<Mat, Mat, float, Mat, Size> preprocess(Mat image, Mat person)
175
  {
176
  /***
 
551
  MPPose poseEstimator(model, confThreshold, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
552
  //! [Open a video file or an image file or a camera stream]
553
  if (!cap.isOpened())
554
+ CV_Error(Error::StsError, "Cannot open video or file");
555
 
556
  static const std::string kWinName = "MPPose Demo";
557
  while (waitKey(1) < 0)
models/text_detection_ppocr/demo.cpp CHANGED
@@ -124,7 +124,7 @@ int main(int argc, char** argv)
124
  else
125
  cap.open(0);
126
  if (!cap.isOpened())
127
- CV_Error(Error::StsError, "Cannot opend video or file");
128
  Mat originalImage;
129
  static const std::string kWinName = modelName;
130
  while (waitKey(1) < 0)
 
124
  else
125
  cap.open(0);
126
  if (!cap.isOpened())
127
+ CV_Error(Error::StsError, "Cannot open video or file");
128
  Mat originalImage;
129
  static const std::string kWinName = modelName;
130
  while (waitKey(1) < 0)
models/text_recognition_crnn/demo.cpp CHANGED
@@ -224,7 +224,7 @@ int main(int argc, char** argv)
224
  else
225
  cap.open(0);
226
  if (!cap.isOpened())
227
- CV_Error(Error::StsError, "Cannot opend video or file");
228
  Mat originalImage;
229
  static const std::string kWinName = modelPath;
230
  while (waitKey(1) < 0)
 
224
  else
225
  cap.open(0);
226
  if (!cap.isOpened())
227
+ CV_Error(Error::StsError, "Cannot open video or file");
228
  Mat originalImage;
229
  static const std::string kWinName = modelPath;
230
  while (waitKey(1) < 0)
models/text_recognition_crnn/demo.py CHANGED
@@ -106,7 +106,7 @@ if __name__ == '__main__':
106
 
107
  # Save results if save is true
108
  if args.save:
109
- print('Resutls saved to result.jpg\n')
110
  cv.imwrite('result.jpg', original_image)
111
 
112
  # Visualize results in a new window
 
106
 
107
  # Save results if save is true
108
  if args.save:
109
+ print('Results saved to result.jpg\n')
110
  cv.imwrite('result.jpg', original_image)
111
 
112
  # Visualize results in a new window