ONNX
Wanli commited on
Commit
f622406
·
0 Parent(s):

add text detection model from ppocrv3 (#180)

Browse files
Files changed (6) hide show
  1. CMakeLists.txt +29 -0
  2. LICENSE +203 -0
  3. README.md +60 -0
  4. demo.cpp +183 -0
  5. demo.py +154 -0
  6. ppocr_det.py +59 -0
CMakeLists.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.24)
2
+ set(project_name "opencv_zoo_text_detection_ppocr")
3
+
4
+ PROJECT (${project_name})
5
+
6
+ set(OPENCV_VERSION "4.8.0")
7
+ set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation")
8
+ find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH})
9
+ # Find OpenCV, you may need to set OpenCV_DIR variable
10
+ # to the absolute path to the directory containing OpenCVConfig.cmake file
11
+ # via the command line or GUI
12
+
13
+ file(GLOB SourceFile
14
+ "demo.cpp")
15
+ # If the package has been found, several variables will
16
+ # be set, you can find the full list with descriptions
17
+ # in the OpenCVConfig.cmake file.
18
+ # Print some message showing some of them
19
+ message(STATUS "OpenCV library status:")
20
+ message(STATUS " config: ${OpenCV_DIR}")
21
+ message(STATUS " version: ${OpenCV_VERSION}")
22
+ message(STATUS " libraries: ${OpenCV_LIBS}")
23
+ message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
24
+
25
+ # Declare the executable target built from your sources
26
+ add_executable(${project_name} ${SourceFile})
27
+
28
+ # Link your application with OpenCV libraries
29
+ target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS})
LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PP-OCRv3 Text Detection
2
+
3
+ PP-OCRv3: More Attempts for the Improvement of Ultra Lightweight OCR System.
4
+
5
+ Note:
6
+
7
+ - The int8 quantization model may produce unstable results due to some loss of accuracy.
8
+ - Original Paddle Models source of English: [here](https://paddleocr.bj.bcebos.com/PP-OCRv3/english/en_PP-OCRv3_det_infer.tar).
9
+ - Original Paddle Models source of Chinese: [here](https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar).
10
+ - `IC15` in the filename means the model is trained on [IC15 dataset](https://rrc.cvc.uab.es/?ch=4&com=introduction), which can detect English text instances only.
11
+ - `TD500` in the filename means the model is trained on [TD500 dataset](http://www.iapr-tc11.org/mediawiki/index.php/MSRA_Text_Detection_500_Database_(MSRA-TD500)), which can detect both English & Chinese instances.
12
+ - Visit https://docs.opencv.org/master/d4/d43/tutorial_dnn_text_spotting.html for more information.
13
+
14
+ ## Demo
15
+
16
+ ### Python
17
+
18
+ Run the following command to try the demo:
19
+
20
+ ```shell
21
+ # detect on camera input
22
+ python demo.py
23
+ # detect on an image
24
+ python demo.py --input /path/to/image -v
25
+
26
+ # get help regarding various parameters
27
+ python demo.py --help
28
+ ```
29
+
30
+ ### C++
31
+
32
+ Install latest OpenCV and CMake >= 3.24.0 to get started with:
33
+
34
+ ```shell
35
+ # A typical and default installation path of OpenCV is /usr/local
36
+ cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation .
37
+ cmake --build build
38
+ # detect on camera input
39
+ ./build/opencv_zoo_text_detection_ppocr -m=/path/to/model
40
+ # detect on an image
41
+ ./build/opencv_zoo_text_detection_ppocr -m=/path/to/model -i=/path/to/image -v
42
+ # get help messages
43
+ ./build/opencv_zoo_text_detection_ppocr -h
44
+ ```
45
+
46
+ ### Example outputs
47
+
48
+ ![mask](./example_outputs/mask.jpg)
49
+
50
+ ![gsoc](./example_outputs/gsoc.jpg)
51
+
52
+ ## License
53
+
54
+ All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
55
+
56
+ ## Reference
57
+
58
+ - https://arxiv.org/abs/2206.03001
59
+ - https://github.com/PaddlePaddle/PaddleOCR
60
+ - https://docs.opencv.org/master/d4/d43/tutorial_dnn_text_spotting.html
demo.cpp ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <iostream>
2
+
3
+ #include <opencv2/dnn.hpp>
4
+ #include <opencv2/imgproc.hpp>
5
+ #include <opencv2/highgui.hpp>
6
+
7
+ using namespace std;
8
+ using namespace cv;
9
+ using namespace dnn;
10
+
11
+ vector< pair<cv::dnn::Backend, cv::dnn::Target> > backendTargetPairs = {
12
+ std::make_pair<cv::dnn::Backend, cv::dnn::Target>(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU),
13
+ std::make_pair<cv::dnn::Backend, cv::dnn::Target>(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA),
14
+ std::make_pair<cv::dnn::Backend, cv::dnn::Target>(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16),
15
+ std::make_pair<cv::dnn::Backend, cv::dnn::Target>(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU),
16
+ std::make_pair<cv::dnn::Backend, cv::dnn::Target>(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU)};
17
+
18
+
19
+ std::string keys =
20
+ "{ help h | | Print help message. }"
21
+ "{ model m | text_detection_ch_ppocrv3_2023may.onnx | Usage: Set model type, defaults to text_detection_ch_ppocrv3_2023may.onnx }"
22
+ "{ input i | | Usage: Path to input image or video file. Skip this argument to capture frames from a camera.}"
23
+ "{ width | 736 | Usage: Resize input image to certain width, default = 736. It should be multiple by 32.}"
24
+ "{ height | 736 | Usage: Resize input image to certain height, default = 736. It should be multiple by 32.}"
25
+ "{ binary_threshold | 0.3 | Usage: Threshold of the binary map, default = 0.3.}"
26
+ "{ polygon_threshold | 0.5 | Usage: Threshold of polygons, default = 0.5.}"
27
+ "{ max_candidates | 200 | Usage: Set maximum number of polygon candidates, default = 200.}"
28
+ "{ unclip_ratio | 2.0 | Usage: The unclip ratio of the detected text region, which determines the output size, default = 2.0.}"
29
+ "{ save s | true | Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.}"
30
+ "{ viz v | true | Usage: Specify to open a new window to show results. Invalid in case of camera input.}"
31
+ "{ backend bt | 0 | Choose one of computation backends: "
32
+ "0: (default) OpenCV implementation + CPU, "
33
+ "1: CUDA + GPU (CUDA), "
34
+ "2: CUDA + GPU (CUDA FP16), "
35
+ "3: TIM-VX + NPU, "
36
+ "4: CANN + NPU}";
37
+
38
+
39
+ class PPOCRDet {
40
+ public:
41
+
42
+ PPOCRDet(string modPath, Size inSize = Size(736, 736), float binThresh = 0.3,
43
+ float polyThresh = 0.5, int maxCand = 200, double unRatio = 2.0,
44
+ dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) : modelPath(modPath), inputSize(inSize), binaryThreshold(binThresh),
45
+ polygonThreshold(polyThresh), maxCandidates(maxCand), unclipRatio(unRatio),
46
+ backendId(bId), targetId(tId)
47
+ {
48
+ this->model = TextDetectionModel_DB(readNet(modelPath));
49
+ this->model.setPreferableBackend(backendId);
50
+ this->model.setPreferableTarget(targetId);
51
+
52
+ this->model.setBinaryThreshold(binaryThreshold);
53
+ this->model.setPolygonThreshold(polygonThreshold);
54
+ this->model.setUnclipRatio(unclipRatio);
55
+ this->model.setMaxCandidates(maxCandidates);
56
+
57
+ this->model.setInputParams(1.0 / 255.0, inputSize, Scalar(122.67891434, 116.66876762, 104.00698793));
58
+ }
59
+ pair< vector<vector<Point>>, vector<float> > infer(Mat image) {
60
+ CV_Assert(image.rows == this->inputSize.height && "height of input image != net input size ");
61
+ CV_Assert(image.cols == this->inputSize.width && "width of input image != net input size ");
62
+ vector<vector<Point>> pt;
63
+ vector<float> confidence;
64
+ this->model.detect(image, pt, confidence);
65
+ return make_pair< vector<vector<Point>> &, vector< float > &>(pt, confidence);
66
+ }
67
+
68
+ private:
69
+ string modelPath;
70
+ TextDetectionModel_DB model;
71
+ Size inputSize;
72
+ float binaryThreshold;
73
+ float polygonThreshold;
74
+ int maxCandidates;
75
+ double unclipRatio;
76
+ dnn::Backend backendId;
77
+ dnn::Target targetId;
78
+
79
+ };
80
+
81
+ Mat visualize(Mat image, pair< vector<vector<Point>>, vector<float> >&results, double fps=-1, Scalar boxColor=Scalar(0, 255, 0), Scalar textColor=Scalar(0, 0, 255), bool isClosed=true, int thickness=2)
82
+ {
83
+ Mat output;
84
+ image.copyTo(output);
85
+ if (fps > 0)
86
+ putText(output, format("FPS: %.2f", fps), Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, textColor);
87
+ polylines(output, results.first, isClosed, boxColor, thickness);
88
+ return output;
89
+ }
90
+
91
+ int main(int argc, char** argv)
92
+ {
93
+ CommandLineParser parser(argc, argv, keys);
94
+
95
+ parser.about("Use this program to run Real-time Scene Text Detection with Differentiable Binarization in opencv Zoo using OpenCV.");
96
+ if (parser.has("help"))
97
+ {
98
+ parser.printMessage();
99
+ return 0;
100
+ }
101
+
102
+ int backendTargetid = parser.get<int>("backend");
103
+ String modelName = parser.get<String>("model");
104
+
105
+ if (modelName.empty())
106
+ {
107
+ CV_Error(Error::StsError, "Model file " + modelName + " not found");
108
+ }
109
+
110
+ Size inpSize(parser.get<int>("width"), parser.get<int>("height"));
111
+ float binThresh = parser.get<float>("binary_threshold");
112
+ float polyThresh = parser.get<float>("polygon_threshold");
113
+ int maxCand = parser.get<int>("max_candidates");
114
+ double unRatio = parser.get<float>("unclip_ratio");
115
+ bool save = parser.get<bool>("save");
116
+ bool viz = parser.get<float>("viz");
117
+
118
+ PPOCRDet model(modelName, inpSize, binThresh, polyThresh, maxCand, unRatio, backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
119
+
120
+ //! [Open a video file or an image file or a camera stream]
121
+ VideoCapture cap;
122
+ if (parser.has("input"))
123
+ cap.open(parser.get<String>("input"));
124
+ else
125
+ cap.open(0);
126
+ if (!cap.isOpened())
127
+ CV_Error(Error::StsError, "Cannot opend video or file");
128
+ Mat originalImage;
129
+ static const std::string kWinName = modelName;
130
+ while (waitKey(1) < 0)
131
+ {
132
+ cap >> originalImage;
133
+ if (originalImage.empty())
134
+ {
135
+ if (parser.has("input"))
136
+ {
137
+ cout << "Frame is empty" << endl;
138
+ break;
139
+ }
140
+ else
141
+ continue;
142
+ }
143
+ int originalW = originalImage.cols;
144
+ int originalH = originalImage.rows;
145
+ double scaleHeight = originalH / double(inpSize.height);
146
+ double scaleWidth = originalW / double(inpSize.width);
147
+ Mat image;
148
+ resize(originalImage, image, inpSize);
149
+
150
+ // inference
151
+ TickMeter tm;
152
+ tm.start();
153
+ pair< vector<vector<Point>>, vector<float> > results = model.infer(image);
154
+ tm.stop();
155
+ auto x = results.first;
156
+ // Scale the results bounding box
157
+ for (auto &pts : results.first)
158
+ {
159
+ for (int i = 0; i < 4; i++)
160
+ {
161
+ pts[i].x = int(pts[i].x * scaleWidth);
162
+ pts[i].y = int(pts[i].y * scaleHeight);
163
+ }
164
+ }
165
+ originalImage = visualize(originalImage, results, tm.getFPS());
166
+ tm.reset();
167
+ if (parser.has("input"))
168
+ {
169
+ if (save)
170
+ {
171
+ cout << "Result image saved to result.jpg\n";
172
+ imwrite("result.jpg", originalImage);
173
+ }
174
+ if (viz)
175
+ imshow(kWinName, originalImage);
176
+ }
177
+ else
178
+ imshow(kWinName, originalImage);
179
+ }
180
+ return 0;
181
+ }
182
+
183
+
demo.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ import argparse
8
+
9
+ import numpy as np
10
+ import cv2 as cv
11
+
12
+ from ppocr_det import PPOCRDet
13
+
14
+ # Check OpenCV version
15
+ assert cv.__version__ >= "4.8.0", \
16
+ "Please install latest opencv-python to try this demo: python3 -m pip install --upgrade opencv-python"
17
+
18
+ # Valid combinations of backends and targets
19
+ backend_target_pairs = [
20
+ [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
21
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
22
+ [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
23
+ [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
24
+ [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
25
+ ]
26
+
27
+ parser = argparse.ArgumentParser(description='PP-OCR Text Detection (https://arxiv.org/abs/2206.03001).')
28
+ parser.add_argument('--input', '-i', type=str,
29
+ help='Usage: Set path to the input image. Omit for using default camera.')
30
+ parser.add_argument('--model', '-m', type=str, default='./text_detection_en_ppocrv3_2023may.onnx',
31
+ help='Usage: Set model path, defaults to text_detection_en_ppocrv3_2023may.onnx.')
32
+ parser.add_argument('--backend_target', '-bt', type=int, default=0,
33
+ help='''Choose one of the backend-target pair to run this demo:
34
+ {:d}: (default) OpenCV implementation + CPU,
35
+ {:d}: CUDA + GPU (CUDA),
36
+ {:d}: CUDA + GPU (CUDA FP16),
37
+ {:d}: TIM-VX + NPU,
38
+ {:d}: CANN + NPU
39
+ '''.format(*[x for x in range(len(backend_target_pairs))]))
40
+ parser.add_argument('--width', type=int, default=736,
41
+ help='Usage: Resize input image to certain width, default = 736. It should be multiple by 32.')
42
+ parser.add_argument('--height', type=int, default=736,
43
+ help='Usage: Resize input image to certain height, default = 736. It should be multiple by 32.')
44
+ parser.add_argument('--binary_threshold', type=float, default=0.3,
45
+ help='Usage: Threshold of the binary map, default = 0.3.')
46
+ parser.add_argument('--polygon_threshold', type=float, default=0.5,
47
+ help='Usage: Threshold of polygons, default = 0.5.')
48
+ parser.add_argument('--max_candidates', type=int, default=200,
49
+ help='Usage: Set maximum number of polygon candidates, default = 200.')
50
+ parser.add_argument('--unclip_ratio', type=np.float64, default=2.0,
51
+ help=' Usage: The unclip ratio of the detected text region, which determines the output size, default = 2.0.')
52
+ parser.add_argument('--save', '-s', action='store_true',
53
+ help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
54
+ parser.add_argument('--vis', '-v', action='store_true',
55
+ help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
56
+ args = parser.parse_args()
57
+
58
+ def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), isClosed=True, thickness=2, fps=None):
59
+ output = image.copy()
60
+
61
+ if fps is not None:
62
+ cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
63
+
64
+ pts = np.array(results[0])
65
+ output = cv.polylines(output, pts, isClosed, box_color, thickness)
66
+
67
+ return output
68
+
69
+ if __name__ == '__main__':
70
+ backend_id = backend_target_pairs[args.backend_target][0]
71
+ target_id = backend_target_pairs[args.backend_target][1]
72
+
73
+ # Instantiate model
74
+ model = PPOCRDet(modelPath=args.model,
75
+ inputSize=[args.width, args.height],
76
+ binaryThreshold=args.binary_threshold,
77
+ polygonThreshold=args.polygon_threshold,
78
+ maxCandidates=args.max_candidates,
79
+ unclipRatio=args.unclip_ratio,
80
+ backendId=backend_id,
81
+ targetId=target_id)
82
+
83
+ # If input is an image
84
+ if args.input is not None:
85
+ original_image = cv.imread(args.input)
86
+ original_w = original_image.shape[1]
87
+ original_h = original_image.shape[0]
88
+ scaleHeight = original_h / args.height
89
+ scaleWidth = original_w / args.width
90
+ image = cv.resize(original_image, [args.width, args.height])
91
+
92
+ # Inference
93
+ results = model.infer(image)
94
+
95
+ # Scale the results bounding box
96
+ for i in range(len(results[0])):
97
+ for j in range(4):
98
+ box = results[0][i][j]
99
+ results[0][i][j][0] = box[0] * scaleWidth
100
+ results[0][i][j][1] = box[1] * scaleHeight
101
+
102
+ # Print results
103
+ print('{} texts detected.'.format(len(results[0])))
104
+ for idx, (bbox, score) in enumerate(zip(results[0], results[1])):
105
+ print('{}: {} {} {} {}, {:.2f}'.format(idx, bbox[0], bbox[1], bbox[2], bbox[3], score))
106
+
107
+ # Draw results on the input image
108
+ original_image = visualize(original_image, results)
109
+
110
+ # Save results if save is true
111
+ if args.save:
112
+ print('Resutls saved to result.jpg\n')
113
+ cv.imwrite('result.jpg', original_image)
114
+
115
+ # Visualize results in a new window
116
+ if args.vis:
117
+ cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
118
+ cv.imshow(args.input, original_image)
119
+ cv.waitKey(0)
120
+ else: # Omit input to call default camera
121
+ deviceId = 0
122
+ cap = cv.VideoCapture(deviceId)
123
+
124
+ tm = cv.TickMeter()
125
+ while cv.waitKey(1) < 0:
126
+ hasFrame, original_image = cap.read()
127
+ if not hasFrame:
128
+ print('No frames grabbed!')
129
+ break
130
+
131
+ original_w = original_image.shape[1]
132
+ original_h = original_image.shape[0]
133
+ scaleHeight = original_h / args.height
134
+ scaleWidth = original_w / args.width
135
+ frame = cv.resize(original_image, [args.width, args.height])
136
+ # Inference
137
+ tm.start()
138
+ results = model.infer(frame) # results is a tuple
139
+ tm.stop()
140
+
141
+ # Scale the results bounding box
142
+ for i in range(len(results[0])):
143
+ for j in range(4):
144
+ box = results[0][i][j]
145
+ results[0][i][j][0] = box[0] * scaleWidth
146
+ results[0][i][j][1] = box[1] * scaleHeight
147
+
148
+ # Draw results on the input image
149
+ original_image = visualize(original_image, results, fps=tm.getFPS())
150
+
151
+ # Visualize results in a new Window
152
+ cv.imshow('{} Demo'.format(model.name), original_image)
153
+
154
+ tm.reset()
ppocr_det.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is part of OpenCV Zoo project.
2
+ # It is subject to the license terms in the LICENSE file found in the same directory.
3
+ #
4
+ # Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
5
+ # Third party copyrights are property of their respective owners.
6
+
7
+ import numpy as np
8
+ import cv2 as cv
9
+
10
+ class PPOCRDet:
11
+ def __init__(self, modelPath, inputSize=[736, 736], binaryThreshold=0.3, polygonThreshold=0.5, maxCandidates=200, unclipRatio=2.0, backendId=0, targetId=0):
12
+ self._modelPath = modelPath
13
+ self._model = cv.dnn_TextDetectionModel_DB(
14
+ cv.dnn.readNet(self._modelPath)
15
+ )
16
+
17
+ self._inputSize = tuple(inputSize) # (w, h)
18
+ self._inputHeight = inputSize[0]
19
+ self._inputWidth = inputSize[1]
20
+ self._binaryThreshold = binaryThreshold
21
+ self._polygonThreshold = polygonThreshold
22
+ self._maxCandidates = maxCandidates
23
+ self._unclipRatio = unclipRatio
24
+ self._backendId = backendId
25
+ self._targetId = targetId
26
+
27
+ self._model.setPreferableBackend(self._backendId)
28
+ self._model.setPreferableTarget(self._targetId)
29
+
30
+ self._model.setBinaryThreshold(self._binaryThreshold)
31
+ self._model.setPolygonThreshold(self._polygonThreshold)
32
+ self._model.setUnclipRatio(self._unclipRatio)
33
+ self._model.setMaxCandidates(self._maxCandidates)
34
+
35
+ self._model.setInputSize(self._inputSize)
36
+ self._model.setInputMean((123.675, 116.28, 103.53))
37
+ self._model.setInputScale(1.0/255.0/np.array([0.229, 0.224, 0.225]))
38
+
39
+ @property
40
+ def name(self):
41
+ return self.__class__.__name__
42
+
43
+ def setBackendAndTarget(self, backendId, targetId):
44
+ self._backendId = backendId
45
+ self._targetId = targetId
46
+ self._model.setPreferableBackend(self._backendId)
47
+ self._model.setPreferableTarget(self._targetId)
48
+
49
+ def setInputSize(self, input_size):
50
+ self._inputSize = tuple(input_size)
51
+ self._model.setInputSize(self._inputSize)
52
+ self._model.setInputMean((123.675, 116.28, 103.53))
53
+ self._model.setInputScale(1.0/255.0/np.array([0.229, 0.224, 0.225]))
54
+
55
+ def infer(self, image):
56
+ assert image.shape[0] == self._inputSize[1], '{} (height of input image) != {} (preset height)'.format(image.shape[0], self._inputSize[1])
57
+ assert image.shape[1] == self._inputSize[0], '{} (width of input image) != {} (preset width)'.format(image.shape[1], self._inputSize[0])
58
+
59
+ return self._model.detect(image)