Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- cc-multilingual-main/cc_net/third_party/sentencepiece/sentencepiece.pc.in +10 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/CMakeLists.txt +4 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/LICENSE +10 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/darts.h +1926 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/LICENSE +24 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/esa.hxx +125 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/sais.hxx +364 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/LICENSE +32 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arena.cc +415 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arenastring.cc +43 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/bytestream.cc +196 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/coded_stream.cc +780 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/common.cc +389 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/extension_set.cc +1916 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_util.cc +814 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena.h +703 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena_impl.h +321 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arenastring.h +403 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/extension_set.h +1462 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_enum_util.h +46 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven.h +200 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven_lite.h +873 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_util.h +391 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/has_bits.h +105 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/implicit_weak_message.h +135 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/inlined_string_field.h +271 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream.h +1400 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream_inl.h +90 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream.h +248 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream_impl_lite.h +383 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h +1219 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_entry_lite.h +671 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_field_lite.h +143 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_type_handler.h +739 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h +424 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/metadata_lite.h +224 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/repeated_field.h +2630 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/bytestream.h +348 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/casts.h +134 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/common.h +242 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/fastmem.h +153 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/hash.h +441 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/int128.h +383 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/io_win32.h +115 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/logging.h +237 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/macros.h +168 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/map_util.h +771 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/mutex.h +130 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h +155 -0
- cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h.org +130 -0
cc-multilingual-main/cc_net/third_party/sentencepiece/sentencepiece.pc.in
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
prefix=@prefix@
|
2 |
+
exec_prefix=@exec_prefix@
|
3 |
+
libdir=@libdir@
|
4 |
+
includedir=@includedir@
|
5 |
+
|
6 |
+
Name: @PROJECT_NAME@
|
7 |
+
Description: Unsupervised text tokenizer and detokenizer for Neural Network-based text generation.
|
8 |
+
Version: @PROJECT_VERSION@
|
9 |
+
Libs: -L${libdir} -lsentencepiece -lsentencepiece_train @libprotobuf_lite@ @pkgconfiglibs@
|
10 |
+
Cflags: -I${includedir} @pkgconfigcflags@
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/CMakeLists.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include_directories(absl/strings darts_clone esaxx protobuf-lite)
|
2 |
+
|
3 |
+
|
4 |
+
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/LICENSE
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2008-2011, Susumu Yata
|
2 |
+
All rights reserved.
|
3 |
+
|
4 |
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
5 |
+
|
6 |
+
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
7 |
+
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
8 |
+
- Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
9 |
+
|
10 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/darts_clone/darts.h
ADDED
@@ -0,0 +1,1926 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef DARTS_H_
|
2 |
+
#define DARTS_H_
|
3 |
+
|
4 |
+
#include <cstdio>
|
5 |
+
#include <exception>
|
6 |
+
#include <new>
|
7 |
+
|
8 |
+
#define DARTS_VERSION "0.32"
|
9 |
+
|
10 |
+
// DARTS_THROW() throws a <Darts::Exception> whose message starts with the
|
11 |
+
// file name and the line number. For example, DARTS_THROW("error message") at
|
12 |
+
// line 123 of "darts.h" throws a <Darts::Exception> which has a pointer to
|
13 |
+
// "darts.h:123: exception: error message". The message is available by using
|
14 |
+
// what() as well as that of <std::exception>.
|
15 |
+
#define DARTS_INT_TO_STR(value) #value
|
16 |
+
#define DARTS_LINE_TO_STR(line) DARTS_INT_TO_STR(line)
|
17 |
+
#define DARTS_LINE_STR DARTS_LINE_TO_STR(__LINE__)
|
18 |
+
#define DARTS_THROW(msg) throw Darts::Details::Exception( \
|
19 |
+
__FILE__ ":" DARTS_LINE_STR ": exception: " msg)
|
20 |
+
|
21 |
+
namespace Darts {
|
22 |
+
|
23 |
+
// The following namespace hides the internal types and classes.
|
24 |
+
namespace Details {
|
25 |
+
|
26 |
+
// This header assumes that <int> and <unsigned int> are 32-bit integer types.
|
27 |
+
//
|
28 |
+
// Darts-clone keeps values associated with keys. The type of the values is
|
29 |
+
// <value_type>. Note that the values must be positive integers because the
|
30 |
+
// most significant bit (MSB) of each value is used to represent whether the
|
31 |
+
// corresponding unit is a leaf or not. Also, the keys are represented by
|
32 |
+
// sequences of <char_type>s. <uchar_type> is the unsigned type of <char_type>.
|
33 |
+
typedef char char_type;
|
34 |
+
typedef unsigned char uchar_type;
|
35 |
+
typedef int value_type;
|
36 |
+
|
37 |
+
// The main structure of Darts-clone is an array of <DoubleArrayUnit>s, and the
|
38 |
+
// unit type is actually a wrapper of <id_type>.
|
39 |
+
typedef unsigned int id_type;
|
40 |
+
|
41 |
+
// <progress_func_type> is the type of callback functions for reporting the
|
42 |
+
// progress of building a dictionary. See also build() of <DoubleArray>.
|
43 |
+
// The 1st argument receives the progress value and the 2nd argument receives
|
44 |
+
// the maximum progress value. A usage example is to show the progress
|
45 |
+
// percentage, 100.0 * (the 1st argument) / (the 2nd argument).
|
46 |
+
typedef int (*progress_func_type)(std::size_t, std::size_t);
|
47 |
+
|
48 |
+
// <DoubleArrayUnit> is the type of double-array units and it is a wrapper of
|
49 |
+
// <id_type> in practice.
|
50 |
+
class DoubleArrayUnit {
|
51 |
+
public:
|
52 |
+
DoubleArrayUnit() : unit_() {}
|
53 |
+
|
54 |
+
// has_leaf() returns whether a leaf unit is immediately derived from the
|
55 |
+
// unit (true) or not (false).
|
56 |
+
bool has_leaf() const {
|
57 |
+
return ((unit_ >> 8) & 1) == 1;
|
58 |
+
}
|
59 |
+
// value() returns the value stored in the unit, and thus value() is
|
60 |
+
// available when and only when the unit is a leaf unit.
|
61 |
+
value_type value() const {
|
62 |
+
return static_cast<value_type>(unit_ & ((1U << 31) - 1));
|
63 |
+
}
|
64 |
+
|
65 |
+
// label() returns the label associted with the unit. Note that a leaf unit
|
66 |
+
// always returns an invalid label. For this feature, leaf unit's label()
|
67 |
+
// returns an <id_type> that has the MSB of 1.
|
68 |
+
id_type label() const {
|
69 |
+
return unit_ & ((1U << 31) | 0xFF);
|
70 |
+
}
|
71 |
+
// offset() returns the offset from the unit to its derived units.
|
72 |
+
id_type offset() const {
|
73 |
+
return (unit_ >> 10) << ((unit_ & (1U << 9)) >> 6);
|
74 |
+
}
|
75 |
+
|
76 |
+
private:
|
77 |
+
id_type unit_;
|
78 |
+
|
79 |
+
// Copyable.
|
80 |
+
};
|
81 |
+
|
82 |
+
// Darts-clone throws an <Exception> for memory allocation failure, invalid
|
83 |
+
// arguments or a too large offset. The last case means that there are too many
|
84 |
+
// keys in the given set of keys. Note that the `msg' of <Exception> must be a
|
85 |
+
// constant or static string because an <Exception> keeps only a pointer to
|
86 |
+
// that string.
|
87 |
+
class Exception : public std::exception {
|
88 |
+
public:
|
89 |
+
explicit Exception(const char *msg = NULL) throw() : msg_(msg) {}
|
90 |
+
Exception(const Exception &rhs) throw() : msg_(rhs.msg_) {}
|
91 |
+
virtual ~Exception() throw() {}
|
92 |
+
|
93 |
+
// <Exception> overrides what() of <std::exception>.
|
94 |
+
virtual const char *what() const throw() {
|
95 |
+
return (msg_ != NULL) ? msg_ : "";
|
96 |
+
}
|
97 |
+
|
98 |
+
private:
|
99 |
+
const char *msg_;
|
100 |
+
|
101 |
+
// Disallows operator=.
|
102 |
+
Exception &operator=(const Exception &);
|
103 |
+
};
|
104 |
+
|
105 |
+
} // namespace Details
|
106 |
+
|
107 |
+
// <DoubleArrayImpl> is the interface of Darts-clone. Note that other
|
108 |
+
// classes should not be accessed from outside.
|
109 |
+
//
|
110 |
+
// <DoubleArrayImpl> has 4 template arguments but only the 3rd one is used as
|
111 |
+
// the type of values. Note that the given <T> is used only from outside, and
|
112 |
+
// the internal value type is not changed from <Darts::Details::value_type>.
|
113 |
+
// In build(), given values are casted from <T> to <Darts::Details::value_type>
|
114 |
+
// by using static_cast. On the other hand, values are casted from
|
115 |
+
// <Darts::Details::value_type> to <T> in searching dictionaries.
|
116 |
+
template <typename, typename, typename T, typename>
|
117 |
+
class DoubleArrayImpl {
|
118 |
+
public:
|
119 |
+
// Even if this <value_type> is changed, the internal value type is still
|
120 |
+
// <Darts::Details::value_type>. Other types, such as 64-bit integer types
|
121 |
+
// and floating-point number types, should not be used.
|
122 |
+
typedef T value_type;
|
123 |
+
// A key is reprenseted by a sequence of <key_type>s. For example,
|
124 |
+
// exactMatchSearch() takes a <const key_type *>.
|
125 |
+
typedef Details::char_type key_type;
|
126 |
+
// In searching dictionaries, the values associated with the matched keys are
|
127 |
+
// stored into or returned as <result_type>s.
|
128 |
+
typedef value_type result_type;
|
129 |
+
|
130 |
+
// <result_pair_type> enables applications to get the lengths of the matched
|
131 |
+
// keys in addition to the values.
|
132 |
+
struct result_pair_type {
|
133 |
+
value_type value;
|
134 |
+
std::size_t length;
|
135 |
+
};
|
136 |
+
|
137 |
+
// The constructor initializes member variables with 0 and NULLs.
|
138 |
+
DoubleArrayImpl() : size_(0), array_(NULL), buf_(NULL) {}
|
139 |
+
// The destructor frees memory allocated for units and then initializes
|
140 |
+
// member variables with 0 and NULLs.
|
141 |
+
virtual ~DoubleArrayImpl() {
|
142 |
+
clear();
|
143 |
+
}
|
144 |
+
|
145 |
+
// <DoubleArrayImpl> has 2 kinds of set_result()s. The 1st set_result() is to
|
146 |
+
// set a value to a <value_type>. The 2nd set_result() is to set a value and
|
147 |
+
// a length to a <result_pair_type>. By using set_result()s, search methods
|
148 |
+
// can return the 2 kinds of results in the same way.
|
149 |
+
// Why the set_result()s are non-static? It is for compatibility.
|
150 |
+
//
|
151 |
+
// The 1st set_result() takes a length as the 3rd argument but it is not
|
152 |
+
// used. If a compiler does a good job, codes for getting the length may be
|
153 |
+
// removed.
|
154 |
+
void set_result(value_type *result, value_type value, std::size_t) const {
|
155 |
+
*result = value;
|
156 |
+
}
|
157 |
+
// The 2nd set_result() uses both `value' and `length'.
|
158 |
+
void set_result(result_pair_type *result,
|
159 |
+
value_type value, std::size_t length) const {
|
160 |
+
result->value = value;
|
161 |
+
result->length = length;
|
162 |
+
}
|
163 |
+
|
164 |
+
// set_array() calls clear() in order to free memory allocated to the old
|
165 |
+
// array and then sets a new array. This function is useful to set a memory-
|
166 |
+
// mapped array. Note that the array set by set_array() is not freed in
|
167 |
+
// clear() and the destructor of <DoubleArrayImpl>.
|
168 |
+
// set_array() can also set the size of the new array but the size is not
|
169 |
+
// used in search methods. So it works well even if the 2nd argument is 0 or
|
170 |
+
// omitted. Remember that size() and total_size() returns 0 in such a case.
|
171 |
+
void set_array(const void *ptr, std::size_t size = 0) {
|
172 |
+
clear();
|
173 |
+
array_ = static_cast<const unit_type *>(ptr);
|
174 |
+
size_ = size;
|
175 |
+
}
|
176 |
+
// array() returns a pointer to the array of units.
|
177 |
+
const void *array() const {
|
178 |
+
return array_;
|
179 |
+
}
|
180 |
+
|
181 |
+
// clear() frees memory allocated to units and then initializes member
|
182 |
+
// variables with 0 and NULLs. Note that clear() does not free memory if the
|
183 |
+
// array of units was set by set_array(). In such a case, `array_' is not
|
184 |
+
// NULL and `buf_' is NULL.
|
185 |
+
void clear() {
|
186 |
+
size_ = 0;
|
187 |
+
array_ = NULL;
|
188 |
+
if (buf_ != NULL) {
|
189 |
+
delete[] buf_;
|
190 |
+
buf_ = NULL;
|
191 |
+
}
|
192 |
+
}
|
193 |
+
|
194 |
+
// unit_size() returns the size of each unit. The size must be 4 bytes.
|
195 |
+
std::size_t unit_size() const {
|
196 |
+
return sizeof(unit_type);
|
197 |
+
}
|
198 |
+
// size() returns the number of units. It can be 0 if set_array() is used.
|
199 |
+
std::size_t size() const {
|
200 |
+
return size_;
|
201 |
+
}
|
202 |
+
// total_size() returns the number of bytes allocated to the array of units.
|
203 |
+
// It can be 0 if set_array() is used.
|
204 |
+
std::size_t total_size() const {
|
205 |
+
return unit_size() * size();
|
206 |
+
}
|
207 |
+
// nonzero_size() exists for compatibility. It always returns the number of
|
208 |
+
// units because it takes long time to count the number of non-zero units.
|
209 |
+
std::size_t nonzero_size() const {
|
210 |
+
return size();
|
211 |
+
}
|
212 |
+
|
213 |
+
// build() constructs a dictionary from given key-value pairs. If `lengths'
|
214 |
+
// is NULL, `keys' is handled as an array of zero-terminated strings. If
|
215 |
+
// `values' is NULL, the index in `keys' is associated with each key, i.e.
|
216 |
+
// the ith key has (i - 1) as its value.
|
217 |
+
// Note that the key-value pairs must be arranged in key order and the values
|
218 |
+
// must not be negative. Also, if there are duplicate keys, only the first
|
219 |
+
// pair will be stored in the resultant dictionary.
|
220 |
+
// `progress_func' is a pointer to a callback function. If it is not NULL,
|
221 |
+
// it will be called in build() so that the caller can check the progress of
|
222 |
+
// dictionary construction. For details, please see the definition of
|
223 |
+
// <Darts::Details::progress_func_type>.
|
224 |
+
// The return value of build() is 0, and it indicates the success of the
|
225 |
+
// operation. Otherwise, build() throws a <Darts::Exception>, which is a
|
226 |
+
// derived class of <std::exception>.
|
227 |
+
// build() uses another construction algorithm if `values' is not NULL. In
|
228 |
+
// this case, Darts-clone uses a Directed Acyclic Word Graph (DAWG) instead
|
229 |
+
// of a trie because a DAWG is likely to be more compact than a trie.
|
230 |
+
int build(std::size_t num_keys, const key_type * const *keys,
|
231 |
+
const std::size_t *lengths = NULL, const value_type *values = NULL,
|
232 |
+
Details::progress_func_type progress_func = NULL);
|
233 |
+
|
234 |
+
// open() reads an array of units from the specified file. And if it goes
|
235 |
+
// well, the old array will be freed and replaced with the new array read
|
236 |
+
// from the file. `offset' specifies the number of bytes to be skipped before
|
237 |
+
// reading an array. `size' specifies the number of bytes to be read from the
|
238 |
+
// file. If the `size' is 0, the whole file will be read.
|
239 |
+
// open() returns 0 iff the operation succeeds. Otherwise, it returns a
|
240 |
+
// non-zero value or throws a <Darts::Exception>. The exception is thrown
|
241 |
+
// when and only when a memory allocation fails.
|
242 |
+
int open(const char *file_name, const char *mode = "rb",
|
243 |
+
std::size_t offset = 0, std::size_t size = 0);
|
244 |
+
// save() writes the array of units into the specified file. `offset'
|
245 |
+
// specifies the number of bytes to be skipped before writing the array.
|
246 |
+
// open() returns 0 iff the operation succeeds. Otherwise, it returns a
|
247 |
+
// non-zero value.
|
248 |
+
int save(const char *file_name, const char *mode = "wb",
|
249 |
+
std::size_t offset = 0) const;
|
250 |
+
|
251 |
+
// The 1st exactMatchSearch() tests whether the given key exists or not, and
|
252 |
+
// if it exists, its value and length are set to `result'. Otherwise, the
|
253 |
+
// value and the length of `result' are set to -1 and 0 respectively.
|
254 |
+
// Note that if `length' is 0, `key' is handled as a zero-terminated string.
|
255 |
+
// `node_pos' specifies the start position of matching. This argument enables
|
256 |
+
// the combination of exactMatchSearch() and traverse(). For example, if you
|
257 |
+
// want to test "xyzA", "xyzBC", and "xyzDE", you can use traverse() to get
|
258 |
+
// the node position corresponding to "xyz" and then you can use
|
259 |
+
// exactMatchSearch() to test "A", "BC", and "DE" from that position.
|
260 |
+
// Note that the length of `result' indicates the length from the `node_pos'.
|
261 |
+
// In the above example, the lengths are { 1, 2, 2 }, not { 4, 5, 5 }.
|
262 |
+
template <class U>
|
263 |
+
void exactMatchSearch(const key_type *key, U &result,
|
264 |
+
std::size_t length = 0, std::size_t node_pos = 0) const {
|
265 |
+
result = exactMatchSearch<U>(key, length, node_pos);
|
266 |
+
}
|
267 |
+
// The 2nd exactMatchSearch() returns a result instead of updating the 2nd
|
268 |
+
// argument. So, the following exactMatchSearch() has only 3 arguments.
|
269 |
+
template <class U>
|
270 |
+
inline U exactMatchSearch(const key_type *key, std::size_t length = 0,
|
271 |
+
std::size_t node_pos = 0) const;
|
272 |
+
|
273 |
+
// commonPrefixSearch() searches for keys which match a prefix of the given
|
274 |
+
// string. If `length' is 0, `key' is handled as a zero-terminated string.
|
275 |
+
// The values and the lengths of at most `max_num_results' matched keys are
|
276 |
+
// stored in `results'. commonPrefixSearch() returns the number of matched
|
277 |
+
// keys. Note that the return value can be larger than `max_num_results' if
|
278 |
+
// there are more than `max_num_results' matches. If you want to get all the
|
279 |
+
// results, allocate more spaces and call commonPrefixSearch() again.
|
280 |
+
// `node_pos' works as well as in exactMatchSearch().
|
281 |
+
template <class U>
|
282 |
+
inline std::size_t commonPrefixSearch(const key_type *key, U *results,
|
283 |
+
std::size_t max_num_results, std::size_t length = 0,
|
284 |
+
std::size_t node_pos = 0) const;
|
285 |
+
|
286 |
+
// In Darts-clone, a dictionary is a deterministic finite-state automaton
|
287 |
+
// (DFA) and traverse() tests transitions on the DFA. The initial state is
|
288 |
+
// `node_pos' and traverse() chooses transitions labeled key[key_pos],
|
289 |
+
// key[key_pos + 1], ... in order. If there is not a transition labeled
|
290 |
+
// key[key_pos + i], traverse() terminates the transitions at that state and
|
291 |
+
// returns -2. Otherwise, traverse() ends without a termination and returns
|
292 |
+
// -1 or a nonnegative value, -1 indicates that the final state was not an
|
293 |
+
// accept state. When a nonnegative value is returned, it is the value
|
294 |
+
// associated with the final accept state. That is, traverse() returns the
|
295 |
+
// value associated with the given key if it exists. Note that traverse()
|
296 |
+
// updates `node_pos' and `key_pos' after each transition.
|
297 |
+
inline value_type traverse(const key_type *key, std::size_t &node_pos,
|
298 |
+
std::size_t &key_pos, std::size_t length = 0) const;
|
299 |
+
|
300 |
+
private:
|
301 |
+
typedef Details::uchar_type uchar_type;
|
302 |
+
typedef Details::id_type id_type;
|
303 |
+
typedef Details::DoubleArrayUnit unit_type;
|
304 |
+
|
305 |
+
std::size_t size_;
|
306 |
+
const unit_type *array_;
|
307 |
+
unit_type *buf_;
|
308 |
+
|
309 |
+
// Disallows copy and assignment.
|
310 |
+
DoubleArrayImpl(const DoubleArrayImpl &);
|
311 |
+
DoubleArrayImpl &operator=(const DoubleArrayImpl &);
|
312 |
+
};
|
313 |
+
|
314 |
+
// <DoubleArray> is the typical instance of <DoubleArrayImpl>. It uses <int>
|
315 |
+
// as the type of values and it is suitable for most cases.
|
316 |
+
typedef DoubleArrayImpl<void, void, int, void> DoubleArray;
|
317 |
+
|
318 |
+
// The interface section ends here. For using Darts-clone, there is no need
|
319 |
+
// to read the remaining section, which gives the implementation of
|
320 |
+
// Darts-clone.
|
321 |
+
|
322 |
+
//
|
323 |
+
// Member functions of DoubleArrayImpl (except build()).
|
324 |
+
//
|
325 |
+
|
326 |
+
template <typename A, typename B, typename T, typename C>
|
327 |
+
int DoubleArrayImpl<A, B, T, C>::open(const char *file_name,
|
328 |
+
const char *mode, std::size_t offset, std::size_t size) {
|
329 |
+
#ifdef _MSC_VER
|
330 |
+
std::FILE *file;
|
331 |
+
if (::fopen_s(&file, file_name, mode) != 0) {
|
332 |
+
return -1;
|
333 |
+
}
|
334 |
+
#else
|
335 |
+
std::FILE *file = std::fopen(file_name, mode);
|
336 |
+
if (file == NULL) {
|
337 |
+
return -1;
|
338 |
+
}
|
339 |
+
#endif
|
340 |
+
|
341 |
+
if (size == 0) {
|
342 |
+
if (std::fseek(file, 0, SEEK_END) != 0) {
|
343 |
+
std::fclose(file);
|
344 |
+
return -1;
|
345 |
+
}
|
346 |
+
size = std::ftell(file) - offset;
|
347 |
+
}
|
348 |
+
|
349 |
+
size /= unit_size();
|
350 |
+
if (size < 256 || (size & 0xFF) != 0) {
|
351 |
+
std::fclose(file);
|
352 |
+
return -1;
|
353 |
+
}
|
354 |
+
|
355 |
+
if (std::fseek(file, offset, SEEK_SET) != 0) {
|
356 |
+
std::fclose(file);
|
357 |
+
return -1;
|
358 |
+
}
|
359 |
+
|
360 |
+
unit_type units[256];
|
361 |
+
if (std::fread(units, unit_size(), 256, file) != 256) {
|
362 |
+
std::fclose(file);
|
363 |
+
return -1;
|
364 |
+
}
|
365 |
+
|
366 |
+
if (units[0].label() != '\0' || units[0].has_leaf() ||
|
367 |
+
units[0].offset() == 0 || units[0].offset() >= 512) {
|
368 |
+
std::fclose(file);
|
369 |
+
return -1;
|
370 |
+
}
|
371 |
+
for (id_type i = 1; i < 256; ++i) {
|
372 |
+
if (units[i].label() <= 0xFF && units[i].offset() >= size) {
|
373 |
+
std::fclose(file);
|
374 |
+
return -1;
|
375 |
+
}
|
376 |
+
}
|
377 |
+
|
378 |
+
unit_type *buf;
|
379 |
+
try {
|
380 |
+
buf = new unit_type[size];
|
381 |
+
for (id_type i = 0; i < 256; ++i) {
|
382 |
+
buf[i] = units[i];
|
383 |
+
}
|
384 |
+
} catch (const std::bad_alloc &) {
|
385 |
+
std::fclose(file);
|
386 |
+
DARTS_THROW("failed to open double-array: std::bad_alloc");
|
387 |
+
}
|
388 |
+
|
389 |
+
if (size > 256) {
|
390 |
+
if (std::fread(buf + 256, unit_size(), size - 256, file) != size - 256) {
|
391 |
+
std::fclose(file);
|
392 |
+
delete[] buf;
|
393 |
+
return -1;
|
394 |
+
}
|
395 |
+
}
|
396 |
+
std::fclose(file);
|
397 |
+
|
398 |
+
clear();
|
399 |
+
|
400 |
+
size_ = size;
|
401 |
+
array_ = buf;
|
402 |
+
buf_ = buf;
|
403 |
+
return 0;
|
404 |
+
}
|
405 |
+
|
406 |
+
template <typename A, typename B, typename T, typename C>
|
407 |
+
int DoubleArrayImpl<A, B, T, C>::save(const char *file_name,
|
408 |
+
const char *mode, std::size_t) const {
|
409 |
+
if (size() == 0) {
|
410 |
+
return -1;
|
411 |
+
}
|
412 |
+
|
413 |
+
#ifdef _MSC_VER
|
414 |
+
std::FILE *file;
|
415 |
+
if (::fopen_s(&file, file_name, mode) != 0) {
|
416 |
+
return -1;
|
417 |
+
}
|
418 |
+
#else
|
419 |
+
std::FILE *file = std::fopen(file_name, mode);
|
420 |
+
if (file == NULL) {
|
421 |
+
return -1;
|
422 |
+
}
|
423 |
+
#endif
|
424 |
+
|
425 |
+
if (std::fwrite(array_, unit_size(), size(), file) != size()) {
|
426 |
+
std::fclose(file);
|
427 |
+
return -1;
|
428 |
+
}
|
429 |
+
std::fclose(file);
|
430 |
+
return 0;
|
431 |
+
}
|
432 |
+
|
433 |
+
template <typename A, typename B, typename T, typename C>
|
434 |
+
template <typename U>
|
435 |
+
inline U DoubleArrayImpl<A, B, T, C>::exactMatchSearch(const key_type *key,
|
436 |
+
std::size_t length, std::size_t node_pos) const {
|
437 |
+
U result;
|
438 |
+
set_result(&result, static_cast<value_type>(-1), 0);
|
439 |
+
|
440 |
+
unit_type unit = array_[node_pos];
|
441 |
+
if (length != 0) {
|
442 |
+
for (std::size_t i = 0; i < length; ++i) {
|
443 |
+
node_pos ^= unit.offset() ^ static_cast<uchar_type>(key[i]);
|
444 |
+
unit = array_[node_pos];
|
445 |
+
if (unit.label() != static_cast<uchar_type>(key[i])) {
|
446 |
+
return result;
|
447 |
+
}
|
448 |
+
}
|
449 |
+
} else {
|
450 |
+
for ( ; key[length] != '\0'; ++length) {
|
451 |
+
node_pos ^= unit.offset() ^ static_cast<uchar_type>(key[length]);
|
452 |
+
unit = array_[node_pos];
|
453 |
+
if (unit.label() != static_cast<uchar_type>(key[length])) {
|
454 |
+
return result;
|
455 |
+
}
|
456 |
+
}
|
457 |
+
}
|
458 |
+
|
459 |
+
if (!unit.has_leaf()) {
|
460 |
+
return result;
|
461 |
+
}
|
462 |
+
unit = array_[node_pos ^ unit.offset()];
|
463 |
+
set_result(&result, static_cast<value_type>(unit.value()), length);
|
464 |
+
return result;
|
465 |
+
}
|
466 |
+
|
467 |
+
template <typename A, typename B, typename T, typename C>
|
468 |
+
template <typename U>
|
469 |
+
inline std::size_t DoubleArrayImpl<A, B, T, C>::commonPrefixSearch(
|
470 |
+
const key_type *key, U *results, std::size_t max_num_results,
|
471 |
+
std::size_t length, std::size_t node_pos) const {
|
472 |
+
std::size_t num_results = 0;
|
473 |
+
|
474 |
+
unit_type unit = array_[node_pos];
|
475 |
+
node_pos ^= unit.offset();
|
476 |
+
if (length != 0) {
|
477 |
+
for (std::size_t i = 0; i < length; ++i) {
|
478 |
+
node_pos ^= static_cast<uchar_type>(key[i]);
|
479 |
+
unit = array_[node_pos];
|
480 |
+
if (unit.label() != static_cast<uchar_type>(key[i])) {
|
481 |
+
return num_results;
|
482 |
+
}
|
483 |
+
|
484 |
+
node_pos ^= unit.offset();
|
485 |
+
if (unit.has_leaf()) {
|
486 |
+
if (num_results < max_num_results) {
|
487 |
+
set_result(&results[num_results], static_cast<value_type>(
|
488 |
+
array_[node_pos].value()), i + 1);
|
489 |
+
}
|
490 |
+
++num_results;
|
491 |
+
}
|
492 |
+
}
|
493 |
+
} else {
|
494 |
+
for ( ; key[length] != '\0'; ++length) {
|
495 |
+
node_pos ^= static_cast<uchar_type>(key[length]);
|
496 |
+
unit = array_[node_pos];
|
497 |
+
if (unit.label() != static_cast<uchar_type>(key[length])) {
|
498 |
+
return num_results;
|
499 |
+
}
|
500 |
+
|
501 |
+
node_pos ^= unit.offset();
|
502 |
+
if (unit.has_leaf()) {
|
503 |
+
if (num_results < max_num_results) {
|
504 |
+
set_result(&results[num_results], static_cast<value_type>(
|
505 |
+
array_[node_pos].value()), length + 1);
|
506 |
+
}
|
507 |
+
++num_results;
|
508 |
+
}
|
509 |
+
}
|
510 |
+
}
|
511 |
+
|
512 |
+
return num_results;
|
513 |
+
}
|
514 |
+
|
515 |
+
template <typename A, typename B, typename T, typename C>
|
516 |
+
inline typename DoubleArrayImpl<A, B, T, C>::value_type
|
517 |
+
DoubleArrayImpl<A, B, T, C>::traverse(const key_type *key,
|
518 |
+
std::size_t &node_pos, std::size_t &key_pos, std::size_t length) const {
|
519 |
+
id_type id = static_cast<id_type>(node_pos);
|
520 |
+
unit_type unit = array_[id];
|
521 |
+
|
522 |
+
if (length != 0) {
|
523 |
+
for ( ; key_pos < length; ++key_pos) {
|
524 |
+
id ^= unit.offset() ^ static_cast<uchar_type>(key[key_pos]);
|
525 |
+
unit = array_[id];
|
526 |
+
if (unit.label() != static_cast<uchar_type>(key[key_pos])) {
|
527 |
+
return static_cast<value_type>(-2);
|
528 |
+
}
|
529 |
+
node_pos = id;
|
530 |
+
}
|
531 |
+
} else {
|
532 |
+
for ( ; key[key_pos] != '\0'; ++key_pos) {
|
533 |
+
id ^= unit.offset() ^ static_cast<uchar_type>(key[key_pos]);
|
534 |
+
unit = array_[id];
|
535 |
+
if (unit.label() != static_cast<uchar_type>(key[key_pos])) {
|
536 |
+
return static_cast<value_type>(-2);
|
537 |
+
}
|
538 |
+
node_pos = id;
|
539 |
+
}
|
540 |
+
}
|
541 |
+
|
542 |
+
if (!unit.has_leaf()) {
|
543 |
+
return static_cast<value_type>(-1);
|
544 |
+
}
|
545 |
+
unit = array_[id ^ unit.offset()];
|
546 |
+
return static_cast<value_type>(unit.value());
|
547 |
+
}
|
548 |
+
|
549 |
+
namespace Details {
|
550 |
+
|
551 |
+
//
|
552 |
+
// Memory management of array.
|
553 |
+
//
|
554 |
+
|
555 |
+
template <typename T>
|
556 |
+
class AutoArray {
|
557 |
+
public:
|
558 |
+
explicit AutoArray(T *array = NULL) : array_(array) {}
|
559 |
+
~AutoArray() {
|
560 |
+
clear();
|
561 |
+
}
|
562 |
+
|
563 |
+
const T &operator[](std::size_t id) const {
|
564 |
+
return array_[id];
|
565 |
+
}
|
566 |
+
T &operator[](std::size_t id) {
|
567 |
+
return array_[id];
|
568 |
+
}
|
569 |
+
|
570 |
+
bool empty() const {
|
571 |
+
return array_ == NULL;
|
572 |
+
}
|
573 |
+
|
574 |
+
void clear() {
|
575 |
+
if (array_ != NULL) {
|
576 |
+
delete[] array_;
|
577 |
+
array_ = NULL;
|
578 |
+
}
|
579 |
+
}
|
580 |
+
void swap(AutoArray *array) {
|
581 |
+
T *temp = array_;
|
582 |
+
array_ = array->array_;
|
583 |
+
array->array_ = temp;
|
584 |
+
}
|
585 |
+
void reset(T *array = NULL) {
|
586 |
+
AutoArray(array).swap(this);
|
587 |
+
}
|
588 |
+
|
589 |
+
private:
|
590 |
+
T *array_;
|
591 |
+
|
592 |
+
// Disallows copy and assignment.
|
593 |
+
AutoArray(const AutoArray &);
|
594 |
+
AutoArray &operator=(const AutoArray &);
|
595 |
+
};
|
596 |
+
|
597 |
+
//
|
598 |
+
// Memory management of resizable array.
|
599 |
+
//
|
600 |
+
|
601 |
+
template <typename T>
|
602 |
+
class AutoPool {
|
603 |
+
public:
|
604 |
+
AutoPool() : buf_(), size_(0), capacity_(0) {}
|
605 |
+
~AutoPool() { clear(); }
|
606 |
+
|
607 |
+
const T &operator[](std::size_t id) const {
|
608 |
+
return *(reinterpret_cast<const T *>(&buf_[0]) + id);
|
609 |
+
}
|
610 |
+
T &operator[](std::size_t id) {
|
611 |
+
return *(reinterpret_cast<T *>(&buf_[0]) + id);
|
612 |
+
}
|
613 |
+
|
614 |
+
bool empty() const {
|
615 |
+
return size_ == 0;
|
616 |
+
}
|
617 |
+
std::size_t size() const {
|
618 |
+
return size_;
|
619 |
+
}
|
620 |
+
|
621 |
+
void clear() {
|
622 |
+
resize(0);
|
623 |
+
buf_.clear();
|
624 |
+
size_ = 0;
|
625 |
+
capacity_ = 0;
|
626 |
+
}
|
627 |
+
|
628 |
+
void push_back(const T &value) {
|
629 |
+
append(value);
|
630 |
+
}
|
631 |
+
void pop_back() {
|
632 |
+
(*this)[--size_].~T();
|
633 |
+
}
|
634 |
+
|
635 |
+
void append() {
|
636 |
+
if (size_ == capacity_)
|
637 |
+
resize_buf(size_ + 1);
|
638 |
+
new(&(*this)[size_++]) T;
|
639 |
+
}
|
640 |
+
void append(const T &value) {
|
641 |
+
if (size_ == capacity_)
|
642 |
+
resize_buf(size_ + 1);
|
643 |
+
new(&(*this)[size_++]) T(value);
|
644 |
+
}
|
645 |
+
|
646 |
+
void resize(std::size_t size) {
|
647 |
+
while (size_ > size) {
|
648 |
+
(*this)[--size_].~T();
|
649 |
+
}
|
650 |
+
if (size > capacity_) {
|
651 |
+
resize_buf(size);
|
652 |
+
}
|
653 |
+
while (size_ < size) {
|
654 |
+
new(&(*this)[size_++]) T;
|
655 |
+
}
|
656 |
+
}
|
657 |
+
void resize(std::size_t size, const T &value) {
|
658 |
+
while (size_ > size) {
|
659 |
+
(*this)[--size_].~T();
|
660 |
+
}
|
661 |
+
if (size > capacity_) {
|
662 |
+
resize_buf(size);
|
663 |
+
}
|
664 |
+
while (size_ < size) {
|
665 |
+
new(&(*this)[size_++]) T(value);
|
666 |
+
}
|
667 |
+
}
|
668 |
+
|
669 |
+
void reserve(std::size_t size) {
|
670 |
+
if (size > capacity_) {
|
671 |
+
resize_buf(size);
|
672 |
+
}
|
673 |
+
}
|
674 |
+
|
675 |
+
private:
|
676 |
+
AutoArray<char> buf_;
|
677 |
+
std::size_t size_;
|
678 |
+
std::size_t capacity_;
|
679 |
+
|
680 |
+
// Disallows copy and assignment.
|
681 |
+
AutoPool(const AutoPool &);
|
682 |
+
AutoPool &operator=(const AutoPool &);
|
683 |
+
|
684 |
+
void resize_buf(std::size_t size);
|
685 |
+
};
|
686 |
+
|
687 |
+
template <typename T>
|
688 |
+
void AutoPool<T>::resize_buf(std::size_t size) {
|
689 |
+
std::size_t capacity;
|
690 |
+
if (size >= capacity_ * 2) {
|
691 |
+
capacity = size;
|
692 |
+
} else {
|
693 |
+
capacity = 1;
|
694 |
+
while (capacity < size) {
|
695 |
+
capacity <<= 1;
|
696 |
+
}
|
697 |
+
}
|
698 |
+
|
699 |
+
AutoArray<char> buf;
|
700 |
+
try {
|
701 |
+
buf.reset(new char[sizeof(T) * capacity]);
|
702 |
+
} catch (const std::bad_alloc &) {
|
703 |
+
DARTS_THROW("failed to resize pool: std::bad_alloc");
|
704 |
+
}
|
705 |
+
|
706 |
+
if (size_ > 0) {
|
707 |
+
T *src = reinterpret_cast<T *>(&buf_[0]);
|
708 |
+
T *dest = reinterpret_cast<T *>(&buf[0]);
|
709 |
+
for (std::size_t i = 0; i < size_; ++i) {
|
710 |
+
new(&dest[i]) T(src[i]);
|
711 |
+
src[i].~T();
|
712 |
+
}
|
713 |
+
}
|
714 |
+
|
715 |
+
buf_.swap(&buf);
|
716 |
+
capacity_ = capacity;
|
717 |
+
}
|
718 |
+
|
719 |
+
//
|
720 |
+
// Memory management of stack.
|
721 |
+
//
|
722 |
+
|
723 |
+
template <typename T>
|
724 |
+
class AutoStack {
|
725 |
+
public:
|
726 |
+
AutoStack() : pool_() {}
|
727 |
+
~AutoStack() {
|
728 |
+
clear();
|
729 |
+
}
|
730 |
+
|
731 |
+
const T &top() const {
|
732 |
+
return pool_[size() - 1];
|
733 |
+
}
|
734 |
+
T &top() {
|
735 |
+
return pool_[size() - 1];
|
736 |
+
}
|
737 |
+
|
738 |
+
bool empty() const {
|
739 |
+
return pool_.empty();
|
740 |
+
}
|
741 |
+
std::size_t size() const {
|
742 |
+
return pool_.size();
|
743 |
+
}
|
744 |
+
|
745 |
+
void push(const T &value) {
|
746 |
+
pool_.push_back(value);
|
747 |
+
}
|
748 |
+
void pop() {
|
749 |
+
pool_.pop_back();
|
750 |
+
}
|
751 |
+
|
752 |
+
void clear() {
|
753 |
+
pool_.clear();
|
754 |
+
}
|
755 |
+
|
756 |
+
private:
|
757 |
+
AutoPool<T> pool_;
|
758 |
+
|
759 |
+
// Disallows copy and assignment.
|
760 |
+
AutoStack(const AutoStack &);
|
761 |
+
AutoStack &operator=(const AutoStack &);
|
762 |
+
};
|
763 |
+
|
764 |
+
//
|
765 |
+
// Succinct bit vector.
|
766 |
+
//
|
767 |
+
|
768 |
+
class BitVector {
|
769 |
+
public:
|
770 |
+
BitVector() : units_(), ranks_(), num_ones_(0), size_(0) {}
|
771 |
+
~BitVector() {
|
772 |
+
clear();
|
773 |
+
}
|
774 |
+
|
775 |
+
bool operator[](std::size_t id) const {
|
776 |
+
return (units_[id / UNIT_SIZE] >> (id % UNIT_SIZE) & 1) == 1;
|
777 |
+
}
|
778 |
+
|
779 |
+
id_type rank(std::size_t id) const {
|
780 |
+
std::size_t unit_id = id / UNIT_SIZE;
|
781 |
+
return ranks_[unit_id] + pop_count(units_[unit_id]
|
782 |
+
& (~0U >> (UNIT_SIZE - (id % UNIT_SIZE) - 1)));
|
783 |
+
}
|
784 |
+
|
785 |
+
void set(std::size_t id, bool bit) {
|
786 |
+
if (bit) {
|
787 |
+
units_[id / UNIT_SIZE] |= 1U << (id % UNIT_SIZE);
|
788 |
+
} else {
|
789 |
+
units_[id / UNIT_SIZE] &= ~(1U << (id % UNIT_SIZE));
|
790 |
+
}
|
791 |
+
}
|
792 |
+
|
793 |
+
bool empty() const {
|
794 |
+
return units_.empty();
|
795 |
+
}
|
796 |
+
std::size_t num_ones() const {
|
797 |
+
return num_ones_;
|
798 |
+
}
|
799 |
+
std::size_t size() const {
|
800 |
+
return size_;
|
801 |
+
}
|
802 |
+
|
803 |
+
void append() {
|
804 |
+
if ((size_ % UNIT_SIZE) == 0) {
|
805 |
+
units_.append(0);
|
806 |
+
}
|
807 |
+
++size_;
|
808 |
+
}
|
809 |
+
void build();
|
810 |
+
|
811 |
+
void clear() {
|
812 |
+
units_.clear();
|
813 |
+
ranks_.clear();
|
814 |
+
}
|
815 |
+
|
816 |
+
private:
|
817 |
+
enum { UNIT_SIZE = sizeof(id_type) * 8 };
|
818 |
+
|
819 |
+
AutoPool<id_type> units_;
|
820 |
+
AutoArray<id_type> ranks_;
|
821 |
+
std::size_t num_ones_;
|
822 |
+
std::size_t size_;
|
823 |
+
|
824 |
+
// Disallows copy and assignment.
|
825 |
+
BitVector(const BitVector &);
|
826 |
+
BitVector &operator=(const BitVector &);
|
827 |
+
|
828 |
+
static id_type pop_count(id_type unit) {
|
829 |
+
unit = ((unit & 0xAAAAAAAA) >> 1) + (unit & 0x55555555);
|
830 |
+
unit = ((unit & 0xCCCCCCCC) >> 2) + (unit & 0x33333333);
|
831 |
+
unit = ((unit >> 4) + unit) & 0x0F0F0F0F;
|
832 |
+
unit += unit >> 8;
|
833 |
+
unit += unit >> 16;
|
834 |
+
return unit & 0xFF;
|
835 |
+
}
|
836 |
+
};
|
837 |
+
|
838 |
+
inline void BitVector::build() {
|
839 |
+
try {
|
840 |
+
ranks_.reset(new id_type[units_.size()]);
|
841 |
+
} catch (const std::bad_alloc &) {
|
842 |
+
DARTS_THROW("failed to build rank index: std::bad_alloc");
|
843 |
+
}
|
844 |
+
|
845 |
+
num_ones_ = 0;
|
846 |
+
for (std::size_t i = 0; i < units_.size(); ++i) {
|
847 |
+
ranks_[i] = num_ones_;
|
848 |
+
num_ones_ += pop_count(units_[i]);
|
849 |
+
}
|
850 |
+
}
|
851 |
+
|
852 |
+
//
|
853 |
+
// Keyset.
|
854 |
+
//
|
855 |
+
|
856 |
+
template <typename T>
|
857 |
+
class Keyset {
|
858 |
+
public:
|
859 |
+
Keyset(std::size_t num_keys, const char_type * const *keys,
|
860 |
+
const std::size_t *lengths, const T *values) :
|
861 |
+
num_keys_(num_keys), keys_(keys), lengths_(lengths), values_(values) {}
|
862 |
+
|
863 |
+
std::size_t num_keys() const {
|
864 |
+
return num_keys_;
|
865 |
+
}
|
866 |
+
const char_type *keys(std::size_t id) const {
|
867 |
+
return keys_[id];
|
868 |
+
}
|
869 |
+
uchar_type keys(std::size_t key_id, std::size_t char_id) const {
|
870 |
+
if (has_lengths() && char_id >= lengths_[key_id])
|
871 |
+
return '\0';
|
872 |
+
return keys_[key_id][char_id];
|
873 |
+
}
|
874 |
+
|
875 |
+
bool has_lengths() const {
|
876 |
+
return lengths_ != NULL;
|
877 |
+
}
|
878 |
+
std::size_t lengths(std::size_t id) const {
|
879 |
+
if (has_lengths()) {
|
880 |
+
return lengths_[id];
|
881 |
+
}
|
882 |
+
std::size_t length = 0;
|
883 |
+
while (keys_[id][length] != '\0') {
|
884 |
+
++length;
|
885 |
+
}
|
886 |
+
return length;
|
887 |
+
}
|
888 |
+
|
889 |
+
bool has_values() const {
|
890 |
+
return values_ != NULL;
|
891 |
+
}
|
892 |
+
const value_type values(std::size_t id) const {
|
893 |
+
if (has_values()) {
|
894 |
+
return static_cast<value_type>(values_[id]);
|
895 |
+
}
|
896 |
+
return static_cast<value_type>(id);
|
897 |
+
}
|
898 |
+
|
899 |
+
private:
|
900 |
+
std::size_t num_keys_;
|
901 |
+
const char_type * const * keys_;
|
902 |
+
const std::size_t *lengths_;
|
903 |
+
const T *values_;
|
904 |
+
|
905 |
+
// Disallows copy and assignment.
|
906 |
+
Keyset(const Keyset &);
|
907 |
+
Keyset &operator=(const Keyset &);
|
908 |
+
};
|
909 |
+
|
910 |
+
//
|
911 |
+
// Node of Directed Acyclic Word Graph (DAWG).
|
912 |
+
//
|
913 |
+
|
914 |
+
class DawgNode {
|
915 |
+
public:
|
916 |
+
DawgNode() : child_(0), sibling_(0), label_('\0'),
|
917 |
+
is_state_(false), has_sibling_(false) {}
|
918 |
+
|
919 |
+
void set_child(id_type child) {
|
920 |
+
child_ = child;
|
921 |
+
}
|
922 |
+
void set_sibling(id_type sibling) {
|
923 |
+
sibling_ = sibling;
|
924 |
+
}
|
925 |
+
void set_value(value_type value) {
|
926 |
+
child_ = value;
|
927 |
+
}
|
928 |
+
void set_label(uchar_type label) {
|
929 |
+
label_ = label;
|
930 |
+
}
|
931 |
+
void set_is_state(bool is_state) {
|
932 |
+
is_state_ = is_state;
|
933 |
+
}
|
934 |
+
void set_has_sibling(bool has_sibling) {
|
935 |
+
has_sibling_ = has_sibling;
|
936 |
+
}
|
937 |
+
|
938 |
+
id_type child() const {
|
939 |
+
return child_;
|
940 |
+
}
|
941 |
+
id_type sibling() const {
|
942 |
+
return sibling_;
|
943 |
+
}
|
944 |
+
value_type value() const {
|
945 |
+
return static_cast<value_type>(child_);
|
946 |
+
}
|
947 |
+
uchar_type label() const {
|
948 |
+
return label_;
|
949 |
+
}
|
950 |
+
bool is_state() const {
|
951 |
+
return is_state_;
|
952 |
+
}
|
953 |
+
bool has_sibling() const {
|
954 |
+
return has_sibling_;
|
955 |
+
}
|
956 |
+
|
957 |
+
id_type unit() const {
|
958 |
+
if (label_ == '\0') {
|
959 |
+
return (child_ << 1) | (has_sibling_ ? 1 : 0);
|
960 |
+
}
|
961 |
+
return (child_ << 2) | (is_state_ ? 2 : 0) | (has_sibling_ ? 1 : 0);
|
962 |
+
}
|
963 |
+
|
964 |
+
private:
|
965 |
+
id_type child_;
|
966 |
+
id_type sibling_;
|
967 |
+
uchar_type label_;
|
968 |
+
bool is_state_;
|
969 |
+
bool has_sibling_;
|
970 |
+
|
971 |
+
// Copyable.
|
972 |
+
};
|
973 |
+
|
974 |
+
//
|
975 |
+
// Fixed unit of Directed Acyclic Word Graph (DAWG).
|
976 |
+
//
|
977 |
+
|
978 |
+
class DawgUnit {
|
979 |
+
public:
|
980 |
+
explicit DawgUnit(id_type unit = 0) : unit_(unit) {}
|
981 |
+
DawgUnit(const DawgUnit &unit) : unit_(unit.unit_) {}
|
982 |
+
|
983 |
+
DawgUnit &operator=(id_type unit) {
|
984 |
+
unit_ = unit;
|
985 |
+
return *this;
|
986 |
+
}
|
987 |
+
|
988 |
+
id_type unit() const {
|
989 |
+
return unit_;
|
990 |
+
}
|
991 |
+
|
992 |
+
id_type child() const {
|
993 |
+
return unit_ >> 2;
|
994 |
+
}
|
995 |
+
bool has_sibling() const {
|
996 |
+
return (unit_ & 1) == 1;
|
997 |
+
}
|
998 |
+
value_type value() const {
|
999 |
+
return static_cast<value_type>(unit_ >> 1);
|
1000 |
+
}
|
1001 |
+
bool is_state() const {
|
1002 |
+
return (unit_ & 2) == 2;
|
1003 |
+
}
|
1004 |
+
|
1005 |
+
private:
|
1006 |
+
id_type unit_;
|
1007 |
+
|
1008 |
+
// Copyable.
|
1009 |
+
};
|
1010 |
+
|
1011 |
+
//
|
1012 |
+
// Directed Acyclic Word Graph (DAWG) builder.
|
1013 |
+
//
|
1014 |
+
|
1015 |
+
class DawgBuilder {
|
1016 |
+
public:
|
1017 |
+
DawgBuilder() : nodes_(), units_(), labels_(), is_intersections_(),
|
1018 |
+
table_(), node_stack_(), recycle_bin_(), num_states_(0) {}
|
1019 |
+
~DawgBuilder() {
|
1020 |
+
clear();
|
1021 |
+
}
|
1022 |
+
|
1023 |
+
id_type root() const {
|
1024 |
+
return 0;
|
1025 |
+
}
|
1026 |
+
|
1027 |
+
id_type child(id_type id) const {
|
1028 |
+
return units_[id].child();
|
1029 |
+
}
|
1030 |
+
id_type sibling(id_type id) const {
|
1031 |
+
return units_[id].has_sibling() ? (id + 1) : 0;
|
1032 |
+
}
|
1033 |
+
int value(id_type id) const {
|
1034 |
+
return units_[id].value();
|
1035 |
+
}
|
1036 |
+
|
1037 |
+
bool is_leaf(id_type id) const {
|
1038 |
+
return label(id) == '\0';
|
1039 |
+
}
|
1040 |
+
uchar_type label(id_type id) const {
|
1041 |
+
return labels_[id];
|
1042 |
+
}
|
1043 |
+
|
1044 |
+
bool is_intersection(id_type id) const {
|
1045 |
+
return is_intersections_[id];
|
1046 |
+
}
|
1047 |
+
id_type intersection_id(id_type id) const {
|
1048 |
+
return is_intersections_.rank(id) - 1;
|
1049 |
+
}
|
1050 |
+
|
1051 |
+
std::size_t num_intersections() const {
|
1052 |
+
return is_intersections_.num_ones();
|
1053 |
+
}
|
1054 |
+
|
1055 |
+
std::size_t size() const {
|
1056 |
+
return units_.size();
|
1057 |
+
}
|
1058 |
+
|
1059 |
+
void init();
|
1060 |
+
void finish();
|
1061 |
+
|
1062 |
+
void insert(const char *key, std::size_t length, value_type value);
|
1063 |
+
|
1064 |
+
void clear();
|
1065 |
+
|
1066 |
+
private:
|
1067 |
+
enum { INITIAL_TABLE_SIZE = 1 << 10 };
|
1068 |
+
|
1069 |
+
AutoPool<DawgNode> nodes_;
|
1070 |
+
AutoPool<DawgUnit> units_;
|
1071 |
+
AutoPool<uchar_type> labels_;
|
1072 |
+
BitVector is_intersections_;
|
1073 |
+
AutoPool<id_type> table_;
|
1074 |
+
AutoStack<id_type> node_stack_;
|
1075 |
+
AutoStack<id_type> recycle_bin_;
|
1076 |
+
std::size_t num_states_;
|
1077 |
+
|
1078 |
+
// Disallows copy and assignment.
|
1079 |
+
DawgBuilder(const DawgBuilder &);
|
1080 |
+
DawgBuilder &operator=(const DawgBuilder &);
|
1081 |
+
|
1082 |
+
void flush(id_type id);
|
1083 |
+
|
1084 |
+
void expand_table();
|
1085 |
+
|
1086 |
+
id_type find_unit(id_type id, id_type *hash_id) const;
|
1087 |
+
id_type find_node(id_type node_id, id_type *hash_id) const;
|
1088 |
+
|
1089 |
+
bool are_equal(id_type node_id, id_type unit_id) const;
|
1090 |
+
|
1091 |
+
id_type hash_unit(id_type id) const;
|
1092 |
+
id_type hash_node(id_type id) const;
|
1093 |
+
|
1094 |
+
id_type append_node();
|
1095 |
+
id_type append_unit();
|
1096 |
+
|
1097 |
+
void free_node(id_type id) {
|
1098 |
+
recycle_bin_.push(id);
|
1099 |
+
}
|
1100 |
+
|
1101 |
+
static id_type hash(id_type key) {
|
1102 |
+
key = ~key + (key << 15); // key = (key << 15) - key - 1;
|
1103 |
+
key = key ^ (key >> 12);
|
1104 |
+
key = key + (key << 2);
|
1105 |
+
key = key ^ (key >> 4);
|
1106 |
+
key = key * 2057; // key = (key + (key << 3)) + (key << 11);
|
1107 |
+
key = key ^ (key >> 16);
|
1108 |
+
return key;
|
1109 |
+
}
|
1110 |
+
};
|
1111 |
+
|
1112 |
+
inline void DawgBuilder::init() {
|
1113 |
+
table_.resize(INITIAL_TABLE_SIZE, 0);
|
1114 |
+
|
1115 |
+
append_node();
|
1116 |
+
append_unit();
|
1117 |
+
|
1118 |
+
num_states_ = 1;
|
1119 |
+
|
1120 |
+
nodes_[0].set_label(0xFF);
|
1121 |
+
node_stack_.push(0);
|
1122 |
+
}
|
1123 |
+
|
1124 |
+
inline void DawgBuilder::finish() {
|
1125 |
+
flush(0);
|
1126 |
+
|
1127 |
+
units_[0] = nodes_[0].unit();
|
1128 |
+
labels_[0] = nodes_[0].label();
|
1129 |
+
|
1130 |
+
nodes_.clear();
|
1131 |
+
table_.clear();
|
1132 |
+
node_stack_.clear();
|
1133 |
+
recycle_bin_.clear();
|
1134 |
+
|
1135 |
+
is_intersections_.build();
|
1136 |
+
}
|
1137 |
+
|
1138 |
+
inline void DawgBuilder::insert(const char *key, std::size_t length,
|
1139 |
+
value_type value) {
|
1140 |
+
if (value < 0) {
|
1141 |
+
DARTS_THROW("failed to insert key: negative value");
|
1142 |
+
} else if (length == 0) {
|
1143 |
+
DARTS_THROW("failed to insert key: zero-length key");
|
1144 |
+
}
|
1145 |
+
|
1146 |
+
id_type id = 0;
|
1147 |
+
std::size_t key_pos = 0;
|
1148 |
+
|
1149 |
+
for ( ; key_pos <= length; ++key_pos) {
|
1150 |
+
id_type child_id = nodes_[id].child();
|
1151 |
+
if (child_id == 0) {
|
1152 |
+
break;
|
1153 |
+
}
|
1154 |
+
|
1155 |
+
uchar_type key_label = static_cast<uchar_type>(key[key_pos]);
|
1156 |
+
if (key_pos < length && key_label == '\0') {
|
1157 |
+
DARTS_THROW("failed to insert key: invalid null character");
|
1158 |
+
}
|
1159 |
+
|
1160 |
+
uchar_type unit_label = nodes_[child_id].label();
|
1161 |
+
if (key_label < unit_label) {
|
1162 |
+
DARTS_THROW("failed to insert key: wrong key order");
|
1163 |
+
} else if (key_label > unit_label) {
|
1164 |
+
nodes_[child_id].set_has_sibling(true);
|
1165 |
+
flush(child_id);
|
1166 |
+
break;
|
1167 |
+
}
|
1168 |
+
id = child_id;
|
1169 |
+
}
|
1170 |
+
|
1171 |
+
if (key_pos > length) {
|
1172 |
+
return;
|
1173 |
+
}
|
1174 |
+
|
1175 |
+
for ( ; key_pos <= length; ++key_pos) {
|
1176 |
+
uchar_type key_label = static_cast<uchar_type>(
|
1177 |
+
(key_pos < length) ? key[key_pos] : '\0');
|
1178 |
+
id_type child_id = append_node();
|
1179 |
+
|
1180 |
+
if (nodes_[id].child() == 0) {
|
1181 |
+
nodes_[child_id].set_is_state(true);
|
1182 |
+
}
|
1183 |
+
nodes_[child_id].set_sibling(nodes_[id].child());
|
1184 |
+
nodes_[child_id].set_label(key_label);
|
1185 |
+
nodes_[id].set_child(child_id);
|
1186 |
+
node_stack_.push(child_id);
|
1187 |
+
|
1188 |
+
id = child_id;
|
1189 |
+
}
|
1190 |
+
nodes_[id].set_value(value);
|
1191 |
+
}
|
1192 |
+
|
1193 |
+
inline void DawgBuilder::clear() {
|
1194 |
+
nodes_.clear();
|
1195 |
+
units_.clear();
|
1196 |
+
labels_.clear();
|
1197 |
+
is_intersections_.clear();
|
1198 |
+
table_.clear();
|
1199 |
+
node_stack_.clear();
|
1200 |
+
recycle_bin_.clear();
|
1201 |
+
num_states_ = 0;
|
1202 |
+
}
|
1203 |
+
|
1204 |
+
inline void DawgBuilder::flush(id_type id) {
|
1205 |
+
while (node_stack_.top() != id) {
|
1206 |
+
id_type node_id = node_stack_.top();
|
1207 |
+
node_stack_.pop();
|
1208 |
+
|
1209 |
+
if (num_states_ >= table_.size() - (table_.size() >> 2)) {
|
1210 |
+
expand_table();
|
1211 |
+
}
|
1212 |
+
|
1213 |
+
id_type num_siblings = 0;
|
1214 |
+
for (id_type i = node_id; i != 0; i = nodes_[i].sibling()) {
|
1215 |
+
++num_siblings;
|
1216 |
+
}
|
1217 |
+
|
1218 |
+
id_type hash_id;
|
1219 |
+
id_type match_id = find_node(node_id, &hash_id);
|
1220 |
+
if (match_id != 0) {
|
1221 |
+
is_intersections_.set(match_id, true);
|
1222 |
+
} else {
|
1223 |
+
id_type unit_id = 0;
|
1224 |
+
for (id_type i = 0; i < num_siblings; ++i) {
|
1225 |
+
unit_id = append_unit();
|
1226 |
+
}
|
1227 |
+
for (id_type i = node_id; i != 0; i = nodes_[i].sibling()) {
|
1228 |
+
units_[unit_id] = nodes_[i].unit();
|
1229 |
+
labels_[unit_id] = nodes_[i].label();
|
1230 |
+
--unit_id;
|
1231 |
+
}
|
1232 |
+
match_id = unit_id + 1;
|
1233 |
+
table_[hash_id] = match_id;
|
1234 |
+
++num_states_;
|
1235 |
+
}
|
1236 |
+
|
1237 |
+
for (id_type i = node_id, next; i != 0; i = next) {
|
1238 |
+
next = nodes_[i].sibling();
|
1239 |
+
free_node(i);
|
1240 |
+
}
|
1241 |
+
|
1242 |
+
nodes_[node_stack_.top()].set_child(match_id);
|
1243 |
+
}
|
1244 |
+
node_stack_.pop();
|
1245 |
+
}
|
1246 |
+
|
1247 |
+
inline void DawgBuilder::expand_table() {
|
1248 |
+
std::size_t table_size = table_.size() << 1;
|
1249 |
+
table_.clear();
|
1250 |
+
table_.resize(table_size, 0);
|
1251 |
+
|
1252 |
+
for (std::size_t i = 1; i < units_.size(); ++i) {
|
1253 |
+
id_type id = static_cast<id_type>(i);
|
1254 |
+
if (labels_[id] == '\0' || units_[id].is_state()) {
|
1255 |
+
id_type hash_id;
|
1256 |
+
find_unit(id, &hash_id);
|
1257 |
+
table_[hash_id] = id;
|
1258 |
+
}
|
1259 |
+
}
|
1260 |
+
}
|
1261 |
+
|
1262 |
+
inline id_type DawgBuilder::find_unit(id_type id, id_type *hash_id) const {
|
1263 |
+
*hash_id = hash_unit(id) % table_.size();
|
1264 |
+
for ( ; ; *hash_id = (*hash_id + 1) % table_.size()) {
|
1265 |
+
id_type unit_id = table_[*hash_id];
|
1266 |
+
if (unit_id == 0) {
|
1267 |
+
break;
|
1268 |
+
}
|
1269 |
+
|
1270 |
+
// There must not be the same unit.
|
1271 |
+
}
|
1272 |
+
return 0;
|
1273 |
+
}
|
1274 |
+
|
1275 |
+
inline id_type DawgBuilder::find_node(id_type node_id,
|
1276 |
+
id_type *hash_id) const {
|
1277 |
+
*hash_id = hash_node(node_id) % table_.size();
|
1278 |
+
for ( ; ; *hash_id = (*hash_id + 1) % table_.size()) {
|
1279 |
+
id_type unit_id = table_[*hash_id];
|
1280 |
+
if (unit_id == 0) {
|
1281 |
+
break;
|
1282 |
+
}
|
1283 |
+
|
1284 |
+
if (are_equal(node_id, unit_id)) {
|
1285 |
+
return unit_id;
|
1286 |
+
}
|
1287 |
+
}
|
1288 |
+
return 0;
|
1289 |
+
}
|
1290 |
+
|
1291 |
+
inline bool DawgBuilder::are_equal(id_type node_id, id_type unit_id) const {
|
1292 |
+
for (id_type i = nodes_[node_id].sibling(); i != 0;
|
1293 |
+
i = nodes_[i].sibling()) {
|
1294 |
+
if (units_[unit_id].has_sibling() == false) {
|
1295 |
+
return false;
|
1296 |
+
}
|
1297 |
+
++unit_id;
|
1298 |
+
}
|
1299 |
+
if (units_[unit_id].has_sibling() == true) {
|
1300 |
+
return false;
|
1301 |
+
}
|
1302 |
+
|
1303 |
+
for (id_type i = node_id; i != 0; i = nodes_[i].sibling(), --unit_id) {
|
1304 |
+
if (nodes_[i].unit() != units_[unit_id].unit() ||
|
1305 |
+
nodes_[i].label() != labels_[unit_id]) {
|
1306 |
+
return false;
|
1307 |
+
}
|
1308 |
+
}
|
1309 |
+
return true;
|
1310 |
+
}
|
1311 |
+
|
1312 |
+
inline id_type DawgBuilder::hash_unit(id_type id) const {
|
1313 |
+
id_type hash_value = 0;
|
1314 |
+
for ( ; id != 0; ++id) {
|
1315 |
+
id_type unit = units_[id].unit();
|
1316 |
+
uchar_type label = labels_[id];
|
1317 |
+
hash_value ^= hash((label << 24) ^ unit);
|
1318 |
+
|
1319 |
+
if (units_[id].has_sibling() == false) {
|
1320 |
+
break;
|
1321 |
+
}
|
1322 |
+
}
|
1323 |
+
return hash_value;
|
1324 |
+
}
|
1325 |
+
|
1326 |
+
inline id_type DawgBuilder::hash_node(id_type id) const {
|
1327 |
+
id_type hash_value = 0;
|
1328 |
+
for ( ; id != 0; id = nodes_[id].sibling()) {
|
1329 |
+
id_type unit = nodes_[id].unit();
|
1330 |
+
uchar_type label = nodes_[id].label();
|
1331 |
+
hash_value ^= hash((label << 24) ^ unit);
|
1332 |
+
}
|
1333 |
+
return hash_value;
|
1334 |
+
}
|
1335 |
+
|
1336 |
+
inline id_type DawgBuilder::append_unit() {
|
1337 |
+
is_intersections_.append();
|
1338 |
+
units_.append();
|
1339 |
+
labels_.append();
|
1340 |
+
|
1341 |
+
return static_cast<id_type>(is_intersections_.size() - 1);
|
1342 |
+
}
|
1343 |
+
|
1344 |
+
inline id_type DawgBuilder::append_node() {
|
1345 |
+
id_type id;
|
1346 |
+
if (recycle_bin_.empty()) {
|
1347 |
+
id = static_cast<id_type>(nodes_.size());
|
1348 |
+
nodes_.append();
|
1349 |
+
} else {
|
1350 |
+
id = recycle_bin_.top();
|
1351 |
+
nodes_[id] = DawgNode();
|
1352 |
+
recycle_bin_.pop();
|
1353 |
+
}
|
1354 |
+
return id;
|
1355 |
+
}
|
1356 |
+
|
1357 |
+
//
|
1358 |
+
// Unit of double-array builder.
|
1359 |
+
//
|
1360 |
+
|
1361 |
+
class DoubleArrayBuilderUnit {
|
1362 |
+
public:
|
1363 |
+
DoubleArrayBuilderUnit() : unit_(0) {}
|
1364 |
+
|
1365 |
+
void set_has_leaf(bool has_leaf) {
|
1366 |
+
if (has_leaf) {
|
1367 |
+
unit_ |= 1U << 8;
|
1368 |
+
} else {
|
1369 |
+
unit_ &= ~(1U << 8);
|
1370 |
+
}
|
1371 |
+
}
|
1372 |
+
void set_value(value_type value) {
|
1373 |
+
unit_ = value | (1U << 31);
|
1374 |
+
}
|
1375 |
+
void set_label(uchar_type label) {
|
1376 |
+
unit_ = (unit_ & ~0xFFU) | label;
|
1377 |
+
}
|
1378 |
+
void set_offset(id_type offset) {
|
1379 |
+
if (offset >= 1U << 29) {
|
1380 |
+
DARTS_THROW("failed to modify unit: too large offset");
|
1381 |
+
}
|
1382 |
+
unit_ &= (1U << 31) | (1U << 8) | 0xFF;
|
1383 |
+
if (offset < 1U << 21) {
|
1384 |
+
unit_ |= (offset << 10);
|
1385 |
+
} else {
|
1386 |
+
unit_ |= (offset << 2) | (1U << 9);
|
1387 |
+
}
|
1388 |
+
}
|
1389 |
+
|
1390 |
+
private:
|
1391 |
+
id_type unit_;
|
1392 |
+
|
1393 |
+
// Copyable.
|
1394 |
+
};
|
1395 |
+
|
1396 |
+
//
|
1397 |
+
// Extra unit of double-array builder.
|
1398 |
+
//
|
1399 |
+
|
1400 |
+
class DoubleArrayBuilderExtraUnit {
|
1401 |
+
public:
|
1402 |
+
DoubleArrayBuilderExtraUnit() : prev_(0), next_(0),
|
1403 |
+
is_fixed_(false), is_used_(false) {}
|
1404 |
+
|
1405 |
+
void set_prev(id_type prev) {
|
1406 |
+
prev_ = prev;
|
1407 |
+
}
|
1408 |
+
void set_next(id_type next) {
|
1409 |
+
next_ = next;
|
1410 |
+
}
|
1411 |
+
void set_is_fixed(bool is_fixed) {
|
1412 |
+
is_fixed_ = is_fixed;
|
1413 |
+
}
|
1414 |
+
void set_is_used(bool is_used) {
|
1415 |
+
is_used_ = is_used;
|
1416 |
+
}
|
1417 |
+
|
1418 |
+
id_type prev() const {
|
1419 |
+
return prev_;
|
1420 |
+
}
|
1421 |
+
id_type next() const {
|
1422 |
+
return next_;
|
1423 |
+
}
|
1424 |
+
bool is_fixed() const {
|
1425 |
+
return is_fixed_;
|
1426 |
+
}
|
1427 |
+
bool is_used() const {
|
1428 |
+
return is_used_;
|
1429 |
+
}
|
1430 |
+
|
1431 |
+
private:
|
1432 |
+
id_type prev_;
|
1433 |
+
id_type next_;
|
1434 |
+
bool is_fixed_;
|
1435 |
+
bool is_used_;
|
1436 |
+
|
1437 |
+
// Copyable.
|
1438 |
+
};
|
1439 |
+
|
1440 |
+
//
|
1441 |
+
// DAWG -> double-array converter.
|
1442 |
+
//
|
1443 |
+
|
1444 |
+
class DoubleArrayBuilder {
|
1445 |
+
public:
|
1446 |
+
explicit DoubleArrayBuilder(progress_func_type progress_func)
|
1447 |
+
: progress_func_(progress_func), units_(), extras_(), labels_(),
|
1448 |
+
table_(), extras_head_(0) {}
|
1449 |
+
~DoubleArrayBuilder() {
|
1450 |
+
clear();
|
1451 |
+
}
|
1452 |
+
|
1453 |
+
template <typename T>
|
1454 |
+
void build(const Keyset<T> &keyset);
|
1455 |
+
void copy(std::size_t *size_ptr, DoubleArrayUnit **buf_ptr) const;
|
1456 |
+
|
1457 |
+
void clear();
|
1458 |
+
|
1459 |
+
private:
|
1460 |
+
enum { BLOCK_SIZE = 256 };
|
1461 |
+
enum { NUM_EXTRA_BLOCKS = 16 };
|
1462 |
+
enum { NUM_EXTRAS = BLOCK_SIZE * NUM_EXTRA_BLOCKS };
|
1463 |
+
|
1464 |
+
enum { UPPER_MASK = 0xFF << 21 };
|
1465 |
+
enum { LOWER_MASK = 0xFF };
|
1466 |
+
|
1467 |
+
typedef DoubleArrayBuilderUnit unit_type;
|
1468 |
+
typedef DoubleArrayBuilderExtraUnit extra_type;
|
1469 |
+
|
1470 |
+
progress_func_type progress_func_;
|
1471 |
+
AutoPool<unit_type> units_;
|
1472 |
+
AutoArray<extra_type> extras_;
|
1473 |
+
AutoPool<uchar_type> labels_;
|
1474 |
+
AutoArray<id_type> table_;
|
1475 |
+
id_type extras_head_;
|
1476 |
+
|
1477 |
+
// Disallows copy and assignment.
|
1478 |
+
DoubleArrayBuilder(const DoubleArrayBuilder &);
|
1479 |
+
DoubleArrayBuilder &operator=(const DoubleArrayBuilder &);
|
1480 |
+
|
1481 |
+
std::size_t num_blocks() const {
|
1482 |
+
return units_.size() / BLOCK_SIZE;
|
1483 |
+
}
|
1484 |
+
|
1485 |
+
const extra_type &extras(id_type id) const {
|
1486 |
+
return extras_[id % NUM_EXTRAS];
|
1487 |
+
}
|
1488 |
+
extra_type &extras(id_type id) {
|
1489 |
+
return extras_[id % NUM_EXTRAS];
|
1490 |
+
}
|
1491 |
+
|
1492 |
+
template <typename T>
|
1493 |
+
void build_dawg(const Keyset<T> &keyset, DawgBuilder *dawg_builder);
|
1494 |
+
void build_from_dawg(const DawgBuilder &dawg);
|
1495 |
+
void build_from_dawg(const DawgBuilder &dawg,
|
1496 |
+
id_type dawg_id, id_type dic_id);
|
1497 |
+
id_type arrange_from_dawg(const DawgBuilder &dawg,
|
1498 |
+
id_type dawg_id, id_type dic_id);
|
1499 |
+
|
1500 |
+
template <typename T>
|
1501 |
+
void build_from_keyset(const Keyset<T> &keyset);
|
1502 |
+
template <typename T>
|
1503 |
+
void build_from_keyset(const Keyset<T> &keyset, std::size_t begin,
|
1504 |
+
std::size_t end, std::size_t depth, id_type dic_id);
|
1505 |
+
template <typename T>
|
1506 |
+
id_type arrange_from_keyset(const Keyset<T> &keyset, std::size_t begin,
|
1507 |
+
std::size_t end, std::size_t depth, id_type dic_id);
|
1508 |
+
|
1509 |
+
id_type find_valid_offset(id_type id) const;
|
1510 |
+
bool is_valid_offset(id_type id, id_type offset) const;
|
1511 |
+
|
1512 |
+
void reserve_id(id_type id);
|
1513 |
+
void expand_units();
|
1514 |
+
|
1515 |
+
void fix_all_blocks();
|
1516 |
+
void fix_block(id_type block_id);
|
1517 |
+
};
|
1518 |
+
|
1519 |
+
template <typename T>
|
1520 |
+
void DoubleArrayBuilder::build(const Keyset<T> &keyset) {
|
1521 |
+
if (keyset.has_values()) {
|
1522 |
+
Details::DawgBuilder dawg_builder;
|
1523 |
+
build_dawg(keyset, &dawg_builder);
|
1524 |
+
build_from_dawg(dawg_builder);
|
1525 |
+
dawg_builder.clear();
|
1526 |
+
} else {
|
1527 |
+
build_from_keyset(keyset);
|
1528 |
+
}
|
1529 |
+
}
|
1530 |
+
|
1531 |
+
inline void DoubleArrayBuilder::copy(std::size_t *size_ptr,
|
1532 |
+
DoubleArrayUnit **buf_ptr) const {
|
1533 |
+
if (size_ptr != NULL) {
|
1534 |
+
*size_ptr = units_.size();
|
1535 |
+
}
|
1536 |
+
if (buf_ptr != NULL) {
|
1537 |
+
*buf_ptr = new DoubleArrayUnit[units_.size()];
|
1538 |
+
unit_type *units = reinterpret_cast<unit_type *>(*buf_ptr);
|
1539 |
+
for (std::size_t i = 0; i < units_.size(); ++i) {
|
1540 |
+
units[i] = units_[i];
|
1541 |
+
}
|
1542 |
+
}
|
1543 |
+
}
|
1544 |
+
|
1545 |
+
inline void DoubleArrayBuilder::clear() {
|
1546 |
+
units_.clear();
|
1547 |
+
extras_.clear();
|
1548 |
+
labels_.clear();
|
1549 |
+
table_.clear();
|
1550 |
+
extras_head_ = 0;
|
1551 |
+
}
|
1552 |
+
|
1553 |
+
template <typename T>
|
1554 |
+
void DoubleArrayBuilder::build_dawg(const Keyset<T> &keyset,
|
1555 |
+
DawgBuilder *dawg_builder) {
|
1556 |
+
dawg_builder->init();
|
1557 |
+
for (std::size_t i = 0; i < keyset.num_keys(); ++i) {
|
1558 |
+
dawg_builder->insert(keyset.keys(i), keyset.lengths(i), keyset.values(i));
|
1559 |
+
if (progress_func_ != NULL) {
|
1560 |
+
progress_func_(i + 1, keyset.num_keys() + 1);
|
1561 |
+
}
|
1562 |
+
}
|
1563 |
+
dawg_builder->finish();
|
1564 |
+
}
|
1565 |
+
|
1566 |
+
inline void DoubleArrayBuilder::build_from_dawg(const DawgBuilder &dawg) {
|
1567 |
+
std::size_t num_units = 1;
|
1568 |
+
while (num_units < dawg.size()) {
|
1569 |
+
num_units <<= 1;
|
1570 |
+
}
|
1571 |
+
units_.reserve(num_units);
|
1572 |
+
|
1573 |
+
table_.reset(new id_type[dawg.num_intersections()]);
|
1574 |
+
for (std::size_t i = 0; i < dawg.num_intersections(); ++i) {
|
1575 |
+
table_[i] = 0;
|
1576 |
+
}
|
1577 |
+
|
1578 |
+
extras_.reset(new extra_type[NUM_EXTRAS]);
|
1579 |
+
|
1580 |
+
reserve_id(0);
|
1581 |
+
extras(0).set_is_used(true);
|
1582 |
+
units_[0].set_offset(1);
|
1583 |
+
units_[0].set_label('\0');
|
1584 |
+
|
1585 |
+
if (dawg.child(dawg.root()) != 0) {
|
1586 |
+
build_from_dawg(dawg, dawg.root(), 0);
|
1587 |
+
}
|
1588 |
+
|
1589 |
+
fix_all_blocks();
|
1590 |
+
|
1591 |
+
extras_.clear();
|
1592 |
+
labels_.clear();
|
1593 |
+
table_.clear();
|
1594 |
+
}
|
1595 |
+
|
1596 |
+
inline void DoubleArrayBuilder::build_from_dawg(const DawgBuilder &dawg,
|
1597 |
+
id_type dawg_id, id_type dic_id) {
|
1598 |
+
id_type dawg_child_id = dawg.child(dawg_id);
|
1599 |
+
if (dawg.is_intersection(dawg_child_id)) {
|
1600 |
+
id_type intersection_id = dawg.intersection_id(dawg_child_id);
|
1601 |
+
id_type offset = table_[intersection_id];
|
1602 |
+
if (offset != 0) {
|
1603 |
+
offset ^= dic_id;
|
1604 |
+
if (!(offset & UPPER_MASK) || !(offset & LOWER_MASK)) {
|
1605 |
+
if (dawg.is_leaf(dawg_child_id)) {
|
1606 |
+
units_[dic_id].set_has_leaf(true);
|
1607 |
+
}
|
1608 |
+
units_[dic_id].set_offset(offset);
|
1609 |
+
return;
|
1610 |
+
}
|
1611 |
+
}
|
1612 |
+
}
|
1613 |
+
|
1614 |
+
id_type offset = arrange_from_dawg(dawg, dawg_id, dic_id);
|
1615 |
+
if (dawg.is_intersection(dawg_child_id)) {
|
1616 |
+
table_[dawg.intersection_id(dawg_child_id)] = offset;
|
1617 |
+
}
|
1618 |
+
|
1619 |
+
do {
|
1620 |
+
uchar_type child_label = dawg.label(dawg_child_id);
|
1621 |
+
id_type dic_child_id = offset ^ child_label;
|
1622 |
+
if (child_label != '\0') {
|
1623 |
+
build_from_dawg(dawg, dawg_child_id, dic_child_id);
|
1624 |
+
}
|
1625 |
+
dawg_child_id = dawg.sibling(dawg_child_id);
|
1626 |
+
} while (dawg_child_id != 0);
|
1627 |
+
}
|
1628 |
+
|
1629 |
+
inline id_type DoubleArrayBuilder::arrange_from_dawg(const DawgBuilder &dawg,
|
1630 |
+
id_type dawg_id, id_type dic_id) {
|
1631 |
+
labels_.resize(0);
|
1632 |
+
|
1633 |
+
id_type dawg_child_id = dawg.child(dawg_id);
|
1634 |
+
while (dawg_child_id != 0) {
|
1635 |
+
labels_.append(dawg.label(dawg_child_id));
|
1636 |
+
dawg_child_id = dawg.sibling(dawg_child_id);
|
1637 |
+
}
|
1638 |
+
|
1639 |
+
id_type offset = find_valid_offset(dic_id);
|
1640 |
+
units_[dic_id].set_offset(dic_id ^ offset);
|
1641 |
+
|
1642 |
+
dawg_child_id = dawg.child(dawg_id);
|
1643 |
+
for (std::size_t i = 0; i < labels_.size(); ++i) {
|
1644 |
+
id_type dic_child_id = offset ^ labels_[i];
|
1645 |
+
reserve_id(dic_child_id);
|
1646 |
+
|
1647 |
+
if (dawg.is_leaf(dawg_child_id)) {
|
1648 |
+
units_[dic_id].set_has_leaf(true);
|
1649 |
+
units_[dic_child_id].set_value(dawg.value(dawg_child_id));
|
1650 |
+
} else {
|
1651 |
+
units_[dic_child_id].set_label(labels_[i]);
|
1652 |
+
}
|
1653 |
+
|
1654 |
+
dawg_child_id = dawg.sibling(dawg_child_id);
|
1655 |
+
}
|
1656 |
+
extras(offset).set_is_used(true);
|
1657 |
+
|
1658 |
+
return offset;
|
1659 |
+
}
|
1660 |
+
|
1661 |
+
template <typename T>
|
1662 |
+
void DoubleArrayBuilder::build_from_keyset(const Keyset<T> &keyset) {
|
1663 |
+
std::size_t num_units = 1;
|
1664 |
+
while (num_units < keyset.num_keys()) {
|
1665 |
+
num_units <<= 1;
|
1666 |
+
}
|
1667 |
+
units_.reserve(num_units);
|
1668 |
+
|
1669 |
+
extras_.reset(new extra_type[NUM_EXTRAS]);
|
1670 |
+
|
1671 |
+
reserve_id(0);
|
1672 |
+
extras(0).set_is_used(true);
|
1673 |
+
units_[0].set_offset(1);
|
1674 |
+
units_[0].set_label('\0');
|
1675 |
+
|
1676 |
+
if (keyset.num_keys() > 0) {
|
1677 |
+
build_from_keyset(keyset, 0, keyset.num_keys(), 0, 0);
|
1678 |
+
}
|
1679 |
+
|
1680 |
+
fix_all_blocks();
|
1681 |
+
|
1682 |
+
extras_.clear();
|
1683 |
+
labels_.clear();
|
1684 |
+
}
|
1685 |
+
|
1686 |
+
template <typename T>
|
1687 |
+
void DoubleArrayBuilder::build_from_keyset(const Keyset<T> &keyset,
|
1688 |
+
std::size_t begin, std::size_t end, std::size_t depth, id_type dic_id) {
|
1689 |
+
id_type offset = arrange_from_keyset(keyset, begin, end, depth, dic_id);
|
1690 |
+
|
1691 |
+
while (begin < end) {
|
1692 |
+
if (keyset.keys(begin, depth) != '\0') {
|
1693 |
+
break;
|
1694 |
+
}
|
1695 |
+
++begin;
|
1696 |
+
}
|
1697 |
+
if (begin == end) {
|
1698 |
+
return;
|
1699 |
+
}
|
1700 |
+
|
1701 |
+
std::size_t last_begin = begin;
|
1702 |
+
uchar_type last_label = keyset.keys(begin, depth);
|
1703 |
+
while (++begin < end) {
|
1704 |
+
uchar_type label = keyset.keys(begin, depth);
|
1705 |
+
if (label != last_label) {
|
1706 |
+
build_from_keyset(keyset, last_begin, begin,
|
1707 |
+
depth + 1, offset ^ last_label);
|
1708 |
+
last_begin = begin;
|
1709 |
+
last_label = keyset.keys(begin, depth);
|
1710 |
+
}
|
1711 |
+
}
|
1712 |
+
build_from_keyset(keyset, last_begin, end, depth + 1, offset ^ last_label);
|
1713 |
+
}
|
1714 |
+
|
1715 |
+
template <typename T>
|
1716 |
+
id_type DoubleArrayBuilder::arrange_from_keyset(const Keyset<T> &keyset,
|
1717 |
+
std::size_t begin, std::size_t end, std::size_t depth, id_type dic_id) {
|
1718 |
+
labels_.resize(0);
|
1719 |
+
|
1720 |
+
value_type value = -1;
|
1721 |
+
for (std::size_t i = begin; i < end; ++i) {
|
1722 |
+
uchar_type label = keyset.keys(i, depth);
|
1723 |
+
if (label == '\0') {
|
1724 |
+
if (keyset.has_lengths() && depth < keyset.lengths(i)) {
|
1725 |
+
DARTS_THROW("failed to build double-array: "
|
1726 |
+
"invalid null character");
|
1727 |
+
} else if (keyset.values(i) < 0) {
|
1728 |
+
DARTS_THROW("failed to build double-array: negative value");
|
1729 |
+
}
|
1730 |
+
|
1731 |
+
if (value == -1) {
|
1732 |
+
value = keyset.values(i);
|
1733 |
+
}
|
1734 |
+
if (progress_func_ != NULL) {
|
1735 |
+
progress_func_(i + 1, keyset.num_keys() + 1);
|
1736 |
+
}
|
1737 |
+
}
|
1738 |
+
|
1739 |
+
if (labels_.empty()) {
|
1740 |
+
labels_.append(label);
|
1741 |
+
} else if (label != labels_[labels_.size() - 1]) {
|
1742 |
+
if (label < labels_[labels_.size() - 1]) {
|
1743 |
+
DARTS_THROW("failed to build double-array: wrong key order");
|
1744 |
+
}
|
1745 |
+
labels_.append(label);
|
1746 |
+
}
|
1747 |
+
}
|
1748 |
+
|
1749 |
+
id_type offset = find_valid_offset(dic_id);
|
1750 |
+
units_[dic_id].set_offset(dic_id ^ offset);
|
1751 |
+
|
1752 |
+
for (std::size_t i = 0; i < labels_.size(); ++i) {
|
1753 |
+
id_type dic_child_id = offset ^ labels_[i];
|
1754 |
+
reserve_id(dic_child_id);
|
1755 |
+
if (labels_[i] == '\0') {
|
1756 |
+
units_[dic_id].set_has_leaf(true);
|
1757 |
+
units_[dic_child_id].set_value(value);
|
1758 |
+
} else {
|
1759 |
+
units_[dic_child_id].set_label(labels_[i]);
|
1760 |
+
}
|
1761 |
+
}
|
1762 |
+
extras(offset).set_is_used(true);
|
1763 |
+
|
1764 |
+
return offset;
|
1765 |
+
}
|
1766 |
+
|
1767 |
+
inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const {
|
1768 |
+
if (extras_head_ >= units_.size()) {
|
1769 |
+
return units_.size() | (id & LOWER_MASK);
|
1770 |
+
}
|
1771 |
+
|
1772 |
+
id_type unfixed_id = extras_head_;
|
1773 |
+
do {
|
1774 |
+
id_type offset = unfixed_id ^ labels_[0];
|
1775 |
+
if (is_valid_offset(id, offset)) {
|
1776 |
+
return offset;
|
1777 |
+
}
|
1778 |
+
unfixed_id = extras(unfixed_id).next();
|
1779 |
+
} while (unfixed_id != extras_head_);
|
1780 |
+
|
1781 |
+
return units_.size() | (id & LOWER_MASK);
|
1782 |
+
}
|
1783 |
+
|
1784 |
+
inline bool DoubleArrayBuilder::is_valid_offset(id_type id,
|
1785 |
+
id_type offset) const {
|
1786 |
+
if (extras(offset).is_used()) {
|
1787 |
+
return false;
|
1788 |
+
}
|
1789 |
+
|
1790 |
+
id_type rel_offset = id ^ offset;
|
1791 |
+
if ((rel_offset & LOWER_MASK) && (rel_offset & UPPER_MASK)) {
|
1792 |
+
return false;
|
1793 |
+
}
|
1794 |
+
|
1795 |
+
for (std::size_t i = 1; i < labels_.size(); ++i) {
|
1796 |
+
if (extras(offset ^ labels_[i]).is_fixed()) {
|
1797 |
+
return false;
|
1798 |
+
}
|
1799 |
+
}
|
1800 |
+
|
1801 |
+
return true;
|
1802 |
+
}
|
1803 |
+
|
1804 |
+
inline void DoubleArrayBuilder::reserve_id(id_type id) {
|
1805 |
+
if (id >= units_.size()) {
|
1806 |
+
expand_units();
|
1807 |
+
}
|
1808 |
+
|
1809 |
+
if (id == extras_head_) {
|
1810 |
+
extras_head_ = extras(id).next();
|
1811 |
+
if (extras_head_ == id) {
|
1812 |
+
extras_head_ = units_.size();
|
1813 |
+
}
|
1814 |
+
}
|
1815 |
+
extras(extras(id).prev()).set_next(extras(id).next());
|
1816 |
+
extras(extras(id).next()).set_prev(extras(id).prev());
|
1817 |
+
extras(id).set_is_fixed(true);
|
1818 |
+
}
|
1819 |
+
|
1820 |
+
inline void DoubleArrayBuilder::expand_units() {
|
1821 |
+
id_type src_num_units = units_.size();
|
1822 |
+
id_type src_num_blocks = num_blocks();
|
1823 |
+
|
1824 |
+
id_type dest_num_units = src_num_units + BLOCK_SIZE;
|
1825 |
+
id_type dest_num_blocks = src_num_blocks + 1;
|
1826 |
+
|
1827 |
+
if (dest_num_blocks > NUM_EXTRA_BLOCKS) {
|
1828 |
+
fix_block(src_num_blocks - NUM_EXTRA_BLOCKS);
|
1829 |
+
}
|
1830 |
+
|
1831 |
+
units_.resize(dest_num_units);
|
1832 |
+
|
1833 |
+
if (dest_num_blocks > NUM_EXTRA_BLOCKS) {
|
1834 |
+
for (std::size_t id = src_num_units; id < dest_num_units; ++id) {
|
1835 |
+
extras(id).set_is_used(false);
|
1836 |
+
extras(id).set_is_fixed(false);
|
1837 |
+
}
|
1838 |
+
}
|
1839 |
+
|
1840 |
+
for (id_type i = src_num_units + 1; i < dest_num_units; ++i) {
|
1841 |
+
extras(i - 1).set_next(i);
|
1842 |
+
extras(i).set_prev(i - 1);
|
1843 |
+
}
|
1844 |
+
|
1845 |
+
extras(src_num_units).set_prev(dest_num_units - 1);
|
1846 |
+
extras(dest_num_units - 1).set_next(src_num_units);
|
1847 |
+
|
1848 |
+
extras(src_num_units).set_prev(extras(extras_head_).prev());
|
1849 |
+
extras(dest_num_units - 1).set_next(extras_head_);
|
1850 |
+
|
1851 |
+
extras(extras(extras_head_).prev()).set_next(src_num_units);
|
1852 |
+
extras(extras_head_).set_prev(dest_num_units - 1);
|
1853 |
+
}
|
1854 |
+
|
1855 |
+
inline void DoubleArrayBuilder::fix_all_blocks() {
|
1856 |
+
id_type begin = 0;
|
1857 |
+
if (num_blocks() > NUM_EXTRA_BLOCKS) {
|
1858 |
+
begin = num_blocks() - NUM_EXTRA_BLOCKS;
|
1859 |
+
}
|
1860 |
+
id_type end = num_blocks();
|
1861 |
+
|
1862 |
+
for (id_type block_id = begin; block_id != end; ++block_id) {
|
1863 |
+
fix_block(block_id);
|
1864 |
+
}
|
1865 |
+
}
|
1866 |
+
|
1867 |
+
inline void DoubleArrayBuilder::fix_block(id_type block_id) {
|
1868 |
+
id_type begin = block_id * BLOCK_SIZE;
|
1869 |
+
id_type end = begin + BLOCK_SIZE;
|
1870 |
+
|
1871 |
+
id_type unused_offset = 0;
|
1872 |
+
for (id_type offset = begin; offset != end; ++offset) {
|
1873 |
+
if (!extras(offset).is_used()) {
|
1874 |
+
unused_offset = offset;
|
1875 |
+
break;
|
1876 |
+
}
|
1877 |
+
}
|
1878 |
+
|
1879 |
+
for (id_type id = begin; id != end; ++id) {
|
1880 |
+
if (!extras(id).is_fixed()) {
|
1881 |
+
reserve_id(id);
|
1882 |
+
units_[id].set_label(static_cast<uchar_type>(id ^ unused_offset));
|
1883 |
+
}
|
1884 |
+
}
|
1885 |
+
}
|
1886 |
+
|
1887 |
+
} // namespace Details
|
1888 |
+
|
1889 |
+
//
|
1890 |
+
// Member function build() of DoubleArrayImpl.
|
1891 |
+
//
|
1892 |
+
|
1893 |
+
template <typename A, typename B, typename T, typename C>
|
1894 |
+
int DoubleArrayImpl<A, B, T, C>::build(std::size_t num_keys,
|
1895 |
+
const key_type * const *keys, const std::size_t *lengths,
|
1896 |
+
const value_type *values, Details::progress_func_type progress_func) {
|
1897 |
+
Details::Keyset<value_type> keyset(num_keys, keys, lengths, values);
|
1898 |
+
|
1899 |
+
Details::DoubleArrayBuilder builder(progress_func);
|
1900 |
+
builder.build(keyset);
|
1901 |
+
|
1902 |
+
std::size_t size = 0;
|
1903 |
+
unit_type *buf = NULL;
|
1904 |
+
builder.copy(&size, &buf);
|
1905 |
+
|
1906 |
+
clear();
|
1907 |
+
|
1908 |
+
size_ = size;
|
1909 |
+
array_ = buf;
|
1910 |
+
buf_ = buf;
|
1911 |
+
|
1912 |
+
if (progress_func != NULL) {
|
1913 |
+
progress_func(num_keys + 1, num_keys + 1);
|
1914 |
+
}
|
1915 |
+
|
1916 |
+
return 0;
|
1917 |
+
}
|
1918 |
+
|
1919 |
+
} // namespace Darts
|
1920 |
+
|
1921 |
+
#undef DARTS_INT_TO_STR
|
1922 |
+
#undef DARTS_LINE_TO_STR
|
1923 |
+
#undef DARTS_LINE_STR
|
1924 |
+
#undef DARTS_THROW
|
1925 |
+
|
1926 |
+
#endif // DARTS_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/LICENSE
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
This is the esaxx copyright.
|
2 |
+
|
3 |
+
Copyright (c) 2010 Daisuke Okanohara All Rights Reserved.
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person
|
6 |
+
obtaining a copy of this software and associated documentation
|
7 |
+
files (the "Software"), to deal in the Software without
|
8 |
+
restriction, including without limitation the rights to use,
|
9 |
+
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
copies of the Software, and to permit persons to whom the
|
11 |
+
Software is furnished to do so, subject to the following
|
12 |
+
conditions:
|
13 |
+
|
14 |
+
The above copyright notice and this permission notice shall be
|
15 |
+
included in all copies or substantial portions of the Software.
|
16 |
+
|
17 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
18 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
19 |
+
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
20 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
21 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
22 |
+
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
23 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
24 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/esa.hxx
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
* esa.hxx
|
3 |
+
* Copyright (c) 2010 Daisuke Okanohara All Rights Reserved.
|
4 |
+
*
|
5 |
+
* Permission is hereby granted, free of charge, to any person
|
6 |
+
* obtaining a copy of this software and associated documentation
|
7 |
+
* files (the "Software"), to deal in the Software without
|
8 |
+
* restriction, including without limitation the rights to use,
|
9 |
+
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
* copies of the Software, and to permit persons to whom the
|
11 |
+
* Software is furnished to do so, subject to the following
|
12 |
+
* conditions:
|
13 |
+
*
|
14 |
+
* The above copyright notice and this permission notice shall be
|
15 |
+
* included in all copies or substantial portions of the Software.
|
16 |
+
*
|
17 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
18 |
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
19 |
+
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
20 |
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
21 |
+
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
22 |
+
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
23 |
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
24 |
+
* OTHER DEALINGS IN THE SOFTWARE.
|
25 |
+
*/
|
26 |
+
|
27 |
+
#ifndef _ESA_HXX
|
28 |
+
#define _ESA_HXX
|
29 |
+
|
30 |
+
#include <vector>
|
31 |
+
#include <utility>
|
32 |
+
#include <cassert>
|
33 |
+
#include "sais.hxx"
|
34 |
+
|
35 |
+
namespace esaxx_private {
|
36 |
+
template<typename string_type, typename sarray_type, typename index_type>
|
37 |
+
index_type suffixtree(string_type T, sarray_type SA, sarray_type L, sarray_type R, sarray_type D, index_type n){
|
38 |
+
if (n == 0){
|
39 |
+
return 0;
|
40 |
+
}
|
41 |
+
sarray_type Psi = L;
|
42 |
+
Psi[SA[0]] = SA[n-1];
|
43 |
+
for (index_type i = 1; i < n; ++i){
|
44 |
+
Psi[SA[i]] = SA[i-1];
|
45 |
+
}
|
46 |
+
|
47 |
+
// Compare at most 2n log n charcters. Practically fastest
|
48 |
+
// "Permuted Longest-Common-Prefix Array", Juha Karkkainen, CPM 09
|
49 |
+
sarray_type PLCP = R;
|
50 |
+
index_type h = 0;
|
51 |
+
for (index_type i = 0; i < n; ++i){
|
52 |
+
index_type j = Psi[i];
|
53 |
+
while (i+h < n && j+h < n &&
|
54 |
+
T[i+h] == T[j+h]){
|
55 |
+
++h;
|
56 |
+
}
|
57 |
+
PLCP[i] = h;
|
58 |
+
if (h > 0) --h;
|
59 |
+
}
|
60 |
+
|
61 |
+
sarray_type H = L;
|
62 |
+
for (index_type i = 0; i < n; ++i){
|
63 |
+
H[i] = PLCP[SA[i]];
|
64 |
+
}
|
65 |
+
H[0] = -1;
|
66 |
+
|
67 |
+
std::vector<std::pair<index_type, index_type> > S;
|
68 |
+
S.push_back(std::make_pair((index_type)-1, (index_type)-1));
|
69 |
+
size_t nodeNum = 0;
|
70 |
+
for (index_type i = 0; ; ++i){
|
71 |
+
std::pair<index_type, index_type> cur (i, (i == n) ? -1 : H[i]);
|
72 |
+
std::pair<index_type, index_type> cand(S.back());
|
73 |
+
while (cand.second > cur.second){
|
74 |
+
if (i - cand.first > 1){
|
75 |
+
L[nodeNum] = cand.first;
|
76 |
+
R[nodeNum] = i;
|
77 |
+
D[nodeNum] = cand.second;
|
78 |
+
++nodeNum;
|
79 |
+
}
|
80 |
+
cur.first = cand.first;
|
81 |
+
S.pop_back();
|
82 |
+
cand = S.back();
|
83 |
+
}
|
84 |
+
if (cand.second < cur.second){
|
85 |
+
S.push_back(cur);
|
86 |
+
}
|
87 |
+
if (i == n) break;
|
88 |
+
S.push_back(std::make_pair(i, n - SA[i] + 1));
|
89 |
+
}
|
90 |
+
return nodeNum;
|
91 |
+
}
|
92 |
+
}
|
93 |
+
|
94 |
+
/**
|
95 |
+
* @brief Build an enhanced suffix array of a given string in linear time
|
96 |
+
* For an input text T, esaxx() builds an enhancd suffix array in linear time.
|
97 |
+
* i-th internal node is represented as a triple (L[i], R[i], D[i]);
|
98 |
+
* L[i] and R[i] is the left/right boundary of the suffix array as SA[L[i]....R[i]-1]
|
99 |
+
* D[i] is the depth of the internal node
|
100 |
+
* The number of internal node is at most N-1 and return the actual number by
|
101 |
+
* @param T[0...n-1] The input string. (random access iterator)
|
102 |
+
* @param SA[0...n-1] The output suffix array (random access iterator)
|
103 |
+
* @param L[0...n-1] The output left boundary of internal node (random access iterator)
|
104 |
+
* @param R[0...n-1] The output right boundary of internal node (random access iterator)
|
105 |
+
* @param D[0...n-1] The output depth of internal node (random access iterator)
|
106 |
+
* @param n The length of the input string
|
107 |
+
* @param k The alphabet size
|
108 |
+
* @pram nodeNum The output the number of internal node
|
109 |
+
* @return 0 if succeded, -1 or -2 otherwise
|
110 |
+
*/
|
111 |
+
|
112 |
+
template<typename string_type, typename sarray_type, typename index_type>
|
113 |
+
int esaxx(string_type T, sarray_type SA, sarray_type L, sarray_type R, sarray_type D,
|
114 |
+
index_type n, index_type k, index_type& nodeNum) {
|
115 |
+
if ((n < 0) || (k <= 0)) return -1;
|
116 |
+
int err = saisxx(T, SA, n, k);
|
117 |
+
if (err != 0){
|
118 |
+
return err;
|
119 |
+
}
|
120 |
+
nodeNum = esaxx_private::suffixtree(T, SA, L, R, D, n);
|
121 |
+
return 0;
|
122 |
+
}
|
123 |
+
|
124 |
+
|
125 |
+
#endif // _ESA_HXX
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/esaxx/sais.hxx
ADDED
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
* sais.hxx for sais-lite
|
3 |
+
* Copyright (c) 2008-2009 Yuta Mori All Rights Reserved.
|
4 |
+
*
|
5 |
+
* Permission is hereby granted, free of charge, to any person
|
6 |
+
* obtaining a copy of this software and associated documentation
|
7 |
+
* files (the "Software"), to deal in the Software without
|
8 |
+
* restriction, including without limitation the rights to use,
|
9 |
+
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
* copies of the Software, and to permit persons to whom the
|
11 |
+
* Software is furnished to do so, subject to the following
|
12 |
+
* conditions:
|
13 |
+
*
|
14 |
+
* The above copyright notice and this permission notice shall be
|
15 |
+
* included in all copies or substantial portions of the Software.
|
16 |
+
*
|
17 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
18 |
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
19 |
+
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
20 |
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
21 |
+
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
22 |
+
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
23 |
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
24 |
+
* OTHER DEALINGS IN THE SOFTWARE.
|
25 |
+
*/
|
26 |
+
|
27 |
+
#ifndef _SAIS_HXX
|
28 |
+
#define _SAIS_HXX 1
|
29 |
+
#ifdef __cplusplus
|
30 |
+
|
31 |
+
#ifdef __INTEL_COMPILER
|
32 |
+
#pragma warning(disable : 383 981 1418)
|
33 |
+
// for icc 64-bit
|
34 |
+
//#define __builtin_vsnprintf(a, b, c, d) __builtin_vsnprintf(a, b, c, (char *)d)
|
35 |
+
#endif
|
36 |
+
|
37 |
+
#include <iterator>
|
38 |
+
#ifdef _OPENMP
|
39 |
+
# include <omp.h>
|
40 |
+
#endif
|
41 |
+
|
42 |
+
namespace saisxx_private {
|
43 |
+
|
44 |
+
/* find the start or end of each bucket */
|
45 |
+
template<typename string_type, typename bucket_type, typename index_type>
|
46 |
+
void
|
47 |
+
getCounts(const string_type T, bucket_type C, index_type n, index_type k) {
|
48 |
+
#ifdef _OPENMP
|
49 |
+
bucket_type D;
|
50 |
+
index_type i, j, p, sum, first, last;
|
51 |
+
int thnum, maxthreads = omp_get_max_threads();
|
52 |
+
#pragma omp parallel default(shared) private(D, i, thnum, first, last)
|
53 |
+
{
|
54 |
+
thnum = omp_get_thread_num();
|
55 |
+
D = C + thnum * k;
|
56 |
+
first = n / maxthreads * thnum;
|
57 |
+
last = (thnum < (maxthreads - 1)) ? n / maxthreads * (thnum + 1) : n;
|
58 |
+
for(i = 0; i < k; ++i) { D[i] = 0; }
|
59 |
+
for(i = first; i < last; ++i) { ++D[T[i]]; }
|
60 |
+
}
|
61 |
+
if(1 < maxthreads) {
|
62 |
+
#pragma omp parallel for default(shared) private(i, j, p, sum)
|
63 |
+
for(i = 0; i < k; ++i) {
|
64 |
+
for(j = 1, p = i + k, sum = C[i]; j < maxthreads; ++j, p += k) {
|
65 |
+
sum += C[p];
|
66 |
+
}
|
67 |
+
C[i] = sum;
|
68 |
+
}
|
69 |
+
}
|
70 |
+
#else
|
71 |
+
index_type i;
|
72 |
+
for(i = 0; i < k; ++i) { C[i] = 0; }
|
73 |
+
for(i = 0; i < n; ++i) { ++C[T[i]]; }
|
74 |
+
#endif
|
75 |
+
}
|
76 |
+
template<typename bucket_type, typename index_type>
|
77 |
+
void
|
78 |
+
getBuckets(const bucket_type C, bucket_type B, index_type k, bool end) {
|
79 |
+
index_type i, sum = 0;
|
80 |
+
if(end) { for(i = 0; i < k; ++i) { sum += C[i]; B[i] = sum; } }
|
81 |
+
else { for(i = 0; i < k; ++i) { sum += C[i]; B[i] = sum - C[i]; } }
|
82 |
+
}
|
83 |
+
|
84 |
+
/* compute SA and BWT */
|
85 |
+
template<typename string_type, typename sarray_type,
|
86 |
+
typename bucket_type, typename index_type>
|
87 |
+
void
|
88 |
+
induceSA(string_type T, sarray_type SA, bucket_type C, bucket_type B,
|
89 |
+
index_type n, index_type k) {
|
90 |
+
typedef typename std::iterator_traits<string_type>::value_type char_type;
|
91 |
+
sarray_type b;
|
92 |
+
index_type i, j;
|
93 |
+
char_type c0, c1;
|
94 |
+
/* compute SAl */
|
95 |
+
if(C == B) { getCounts(T, C, n, k); }
|
96 |
+
getBuckets(C, B, k, false); /* find starts of buckets */
|
97 |
+
b = SA + B[c1 = T[j = n - 1]];
|
98 |
+
*b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j;
|
99 |
+
for(i = 0; i < n; ++i) {
|
100 |
+
j = SA[i], SA[i] = ~j;
|
101 |
+
if(0 < j) {
|
102 |
+
if((c0 = T[--j]) != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; }
|
103 |
+
*b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j;
|
104 |
+
}
|
105 |
+
}
|
106 |
+
/* compute SAs */
|
107 |
+
if(C == B) { getCounts(T, C, n, k); }
|
108 |
+
getBuckets(C, B, k, true); /* find ends of buckets */
|
109 |
+
for(i = n - 1, b = SA + B[c1 = 0]; 0 <= i; --i) {
|
110 |
+
if(0 < (j = SA[i])) {
|
111 |
+
if((c0 = T[--j]) != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; }
|
112 |
+
*--b = ((j == 0) || (T[j - 1] > c1)) ? ~j : j;
|
113 |
+
} else {
|
114 |
+
SA[i] = ~j;
|
115 |
+
}
|
116 |
+
}
|
117 |
+
}
|
118 |
+
template<typename string_type, typename sarray_type,
|
119 |
+
typename bucket_type, typename index_type>
|
120 |
+
int
|
121 |
+
computeBWT(string_type T, sarray_type SA, bucket_type C, bucket_type B,
|
122 |
+
index_type n, index_type k) {
|
123 |
+
typedef typename std::iterator_traits<string_type>::value_type char_type;
|
124 |
+
sarray_type b;
|
125 |
+
index_type i, j, pidx = -1;
|
126 |
+
char_type c0, c1;
|
127 |
+
/* compute SAl */
|
128 |
+
if(C == B) { getCounts(T, C, n, k); }
|
129 |
+
getBuckets(C, B, k, false); /* find starts of buckets */
|
130 |
+
b = SA + B[c1 = T[j = n - 1]];
|
131 |
+
*b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j;
|
132 |
+
for(i = 0; i < n; ++i) {
|
133 |
+
if(0 < (j = SA[i])) {
|
134 |
+
SA[i] = ~(c0 = T[--j]);
|
135 |
+
if(c0 != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; }
|
136 |
+
*b++ = ((0 < j) && (T[j - 1] < c1)) ? ~j : j;
|
137 |
+
} else if(j != 0) {
|
138 |
+
SA[i] = ~j;
|
139 |
+
}
|
140 |
+
}
|
141 |
+
/* compute SAs */
|
142 |
+
if(C == B) { getCounts(T, C, n, k); }
|
143 |
+
getBuckets(C, B, k, true); /* find ends of buckets */
|
144 |
+
for(i = n - 1, b = SA + B[c1 = 0]; 0 <= i; --i) {
|
145 |
+
if(0 < (j = SA[i])) {
|
146 |
+
SA[i] = (c0 = T[--j]);
|
147 |
+
if(c0 != c1) { B[c1] = b - SA; b = SA + B[c1 = c0]; }
|
148 |
+
*--b = ((0 < j) && (T[j - 1] > c1)) ? ~((index_type)T[j - 1]) : j;
|
149 |
+
} else if(j != 0) {
|
150 |
+
SA[i] = ~j;
|
151 |
+
} else {
|
152 |
+
pidx = i;
|
153 |
+
}
|
154 |
+
}
|
155 |
+
return pidx;
|
156 |
+
}
|
157 |
+
|
158 |
+
/* find the suffix array SA of T[0..n-1] in {0..k}^n
|
159 |
+
use a working space (excluding s and SA) of at most 2n+O(1) for a constant alphabet */
|
160 |
+
template<typename string_type, typename sarray_type, typename index_type>
|
161 |
+
int
|
162 |
+
suffixsort(string_type T, sarray_type SA,
|
163 |
+
index_type fs, index_type n, index_type k,
|
164 |
+
bool isbwt) {
|
165 |
+
typedef typename std::iterator_traits<string_type>::value_type char_type;
|
166 |
+
sarray_type RA;
|
167 |
+
index_type i, j, m, p, q, plen, qlen, name, pidx = 0;
|
168 |
+
bool diff;
|
169 |
+
int c;
|
170 |
+
#ifdef _OPENMP
|
171 |
+
int maxthreads = omp_get_max_threads();
|
172 |
+
#else
|
173 |
+
# define maxthreads 1
|
174 |
+
#endif
|
175 |
+
char_type c0, c1;
|
176 |
+
|
177 |
+
/* stage 1: reduce the problem by at least 1/2
|
178 |
+
sort all the S-substrings */
|
179 |
+
if(fs < (maxthreads * k)) {
|
180 |
+
index_type *C, *B;
|
181 |
+
if((C = new index_type[maxthreads * k]) == 0) { return -2; }
|
182 |
+
B = (1 < maxthreads) ? C + k : C;
|
183 |
+
getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */
|
184 |
+
#ifdef _OPENMP
|
185 |
+
#pragma omp parallel for default(shared) private(i)
|
186 |
+
#endif
|
187 |
+
for(i = 0; i < n; ++i) { SA[i] = 0; }
|
188 |
+
for(i = n - 2, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) {
|
189 |
+
if((c0 = T[i]) < (c1 + c)) { c = 1; }
|
190 |
+
else if(c != 0) { SA[--B[c1]] = i + 1, c = 0; }
|
191 |
+
}
|
192 |
+
induceSA(T, SA, C, B, n, k);
|
193 |
+
delete [] C;
|
194 |
+
} else {
|
195 |
+
sarray_type C, B;
|
196 |
+
C = SA + n;
|
197 |
+
B = ((1 < maxthreads) || (k <= (fs - k))) ? C + k : C;
|
198 |
+
getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */
|
199 |
+
#ifdef _OPENMP
|
200 |
+
#pragma omp parallel for default(shared) private(i)
|
201 |
+
#endif
|
202 |
+
for(i = 0; i < n; ++i) { SA[i] = 0; }
|
203 |
+
for(i = n - 2, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) {
|
204 |
+
if((c0 = T[i]) < (c1 + c)) { c = 1; }
|
205 |
+
else if(c != 0) { SA[--B[c1]] = i + 1, c = 0; }
|
206 |
+
}
|
207 |
+
induceSA(T, SA, C, B, n, k);
|
208 |
+
}
|
209 |
+
|
210 |
+
/* compact all the sorted substrings into the first m items of SA
|
211 |
+
2*m must be not larger than n (proveable) */
|
212 |
+
#ifdef _OPENMP
|
213 |
+
#pragma omp parallel for default(shared) private(i, j, p, c0, c1)
|
214 |
+
for(i = 0; i < n; ++i) {
|
215 |
+
p = SA[i];
|
216 |
+
if((0 < p) && (T[p - 1] > (c0 = T[p]))) {
|
217 |
+
for(j = p + 1; (j < n) && (c0 == (c1 = T[j])); ++j) { }
|
218 |
+
if((j < n) && (c0 < c1)) { SA[i] = ~p; }
|
219 |
+
}
|
220 |
+
}
|
221 |
+
for(i = 0, m = 0; i < n; ++i) { if((p = SA[i]) < 0) { SA[m++] = ~p; } }
|
222 |
+
#else
|
223 |
+
for(i = 0, m = 0; i < n; ++i) {
|
224 |
+
p = SA[i];
|
225 |
+
if((0 < p) && (T[p - 1] > (c0 = T[p]))) {
|
226 |
+
for(j = p + 1; (j < n) && (c0 == (c1 = T[j])); ++j) { }
|
227 |
+
if((j < n) && (c0 < c1)) { SA[m++] = p; }
|
228 |
+
}
|
229 |
+
}
|
230 |
+
#endif
|
231 |
+
j = m + (n >> 1);
|
232 |
+
#ifdef _OPENMP
|
233 |
+
#pragma omp parallel for default(shared) private(i)
|
234 |
+
#endif
|
235 |
+
for(i = m; i < j; ++i) { SA[i] = 0; } /* init the name array buffer */
|
236 |
+
/* store the length of all substrings */
|
237 |
+
for(i = n - 2, j = n, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) {
|
238 |
+
if((c0 = T[i]) < (c1 + c)) { c = 1; }
|
239 |
+
else if(c != 0) { SA[m + ((i + 1) >> 1)] = j - i - 1; j = i + 1; c = 0; }
|
240 |
+
}
|
241 |
+
/* find the lexicographic names of all substrings */
|
242 |
+
for(i = 0, name = 0, q = n, qlen = 0; i < m; ++i) {
|
243 |
+
p = SA[i], plen = SA[m + (p >> 1)], diff = true;
|
244 |
+
if(plen == qlen) {
|
245 |
+
for(j = 0; (j < plen) && (T[p + j] == T[q + j]); ++j) { }
|
246 |
+
if(j == plen) { diff = false; }
|
247 |
+
}
|
248 |
+
if(diff != false) { ++name, q = p, qlen = plen; }
|
249 |
+
SA[m + (p >> 1)] = name;
|
250 |
+
}
|
251 |
+
|
252 |
+
/* stage 2: solve the reduced problem
|
253 |
+
recurse if names are not yet unique */
|
254 |
+
if(name < m) {
|
255 |
+
RA = SA + n + fs - m;
|
256 |
+
for(i = m + (n >> 1) - 1, j = m - 1; m <= i; --i) {
|
257 |
+
if(SA[i] != 0) { RA[j--] = SA[i] - 1; }
|
258 |
+
}
|
259 |
+
if(suffixsort(RA, SA, fs + n - m * 2, m, name, false) != 0) { return -2; }
|
260 |
+
for(i = n - 2, j = m - 1, c = 0, c1 = T[n - 1]; 0 <= i; --i, c1 = c0) {
|
261 |
+
if((c0 = T[i]) < (c1 + c)) { c = 1; }
|
262 |
+
else if(c != 0) { RA[j--] = i + 1, c = 0; } /* get p1 */
|
263 |
+
}
|
264 |
+
#ifdef _OPENMP
|
265 |
+
#pragma omp parallel for default(shared) private(i)
|
266 |
+
#endif
|
267 |
+
for(i = 0; i < m; ++i) { SA[i] = RA[SA[i]]; } /* get index in s */
|
268 |
+
}
|
269 |
+
|
270 |
+
/* stage 3: induce the result for the original problem */
|
271 |
+
if(fs < (maxthreads * k)) {
|
272 |
+
index_type *B, *C;
|
273 |
+
if((C = new index_type[maxthreads * k]) == 0) { return -2; }
|
274 |
+
B = (1 < maxthreads) ? C + k : C;
|
275 |
+
/* put all left-most S characters into their buckets */
|
276 |
+
getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */
|
277 |
+
#ifdef _OPENMP
|
278 |
+
#pragma omp parallel for default(shared) private(i)
|
279 |
+
#endif
|
280 |
+
for(i = m; i < n; ++i) { SA[i] = 0; } /* init SA[m..n-1] */
|
281 |
+
for(i = m - 1; 0 <= i; --i) {
|
282 |
+
j = SA[i], SA[i] = 0;
|
283 |
+
SA[--B[T[j]]] = j;
|
284 |
+
}
|
285 |
+
if(isbwt == false) { induceSA(T, SA, C, B, n, k); }
|
286 |
+
else { pidx = computeBWT(T, SA, C, B, n, k); }
|
287 |
+
delete [] C;
|
288 |
+
} else {
|
289 |
+
sarray_type C, B;
|
290 |
+
C = SA + n;
|
291 |
+
B = ((1 < maxthreads) || (k <= (fs - k))) ? C + k : C;
|
292 |
+
/* put all left-most S characters into their buckets */
|
293 |
+
getCounts(T, C, n, k); getBuckets(C, B, k, true); /* find ends of buckets */
|
294 |
+
#ifdef _OPENMP
|
295 |
+
#pragma omp parallel for default(shared) private(i)
|
296 |
+
#endif
|
297 |
+
for(i = m; i < n; ++i) { SA[i] = 0; } /* init SA[m..n-1] */
|
298 |
+
for(i = m - 1; 0 <= i; --i) {
|
299 |
+
j = SA[i], SA[i] = 0;
|
300 |
+
SA[--B[T[j]]] = j;
|
301 |
+
}
|
302 |
+
if(isbwt == false) { induceSA(T, SA, C, B, n, k); }
|
303 |
+
else { pidx = computeBWT(T, SA, C, B, n, k); }
|
304 |
+
}
|
305 |
+
|
306 |
+
return pidx;
|
307 |
+
#ifndef _OPENMP
|
308 |
+
# undef maxthreads
|
309 |
+
#endif
|
310 |
+
}
|
311 |
+
|
312 |
+
} /* namespace saisxx_private */
|
313 |
+
|
314 |
+
|
315 |
+
/**
|
316 |
+
* @brief Constructs the suffix array of a given string in linear time.
|
317 |
+
* @param T[0..n-1] The input string. (random access iterator)
|
318 |
+
* @param SA[0..n-1] The output array of suffixes. (random access iterator)
|
319 |
+
* @param n The length of the given string.
|
320 |
+
* @param k The alphabet size.
|
321 |
+
* @return 0 if no error occurred, -1 or -2 otherwise.
|
322 |
+
*/
|
323 |
+
template<typename string_type, typename sarray_type, typename index_type>
|
324 |
+
int
|
325 |
+
saisxx(string_type T, sarray_type SA, index_type n, index_type k = 256) {
|
326 |
+
int err;
|
327 |
+
if((n < 0) || (k <= 0)) { return -1; }
|
328 |
+
if(n <= 1) { if(n == 1) { SA[0] = 0; } return 0; }
|
329 |
+
try { err = saisxx_private::suffixsort(T, SA, 0, n, k, false); }
|
330 |
+
catch(...) { err = -2; }
|
331 |
+
return err;
|
332 |
+
}
|
333 |
+
|
334 |
+
/**
|
335 |
+
* @brief Constructs the burrows-wheeler transformed string of a given string in linear time.
|
336 |
+
* @param T[0..n-1] The input string. (random access iterator)
|
337 |
+
* @param U[0..n-1] The output string. (random access iterator)
|
338 |
+
* @param A[0..n-1] The temporary array. (random access iterator)
|
339 |
+
* @param n The length of the given string.
|
340 |
+
* @param k The alphabet size.
|
341 |
+
* @return The primary index if no error occurred, -1 or -2 otherwise.
|
342 |
+
*/
|
343 |
+
template<typename string_type, typename sarray_type, typename index_type>
|
344 |
+
index_type
|
345 |
+
saisxx_bwt(string_type T, string_type U, sarray_type A, index_type n, index_type k = 256) {
|
346 |
+
typedef typename std::iterator_traits<string_type>::value_type char_type;
|
347 |
+
index_type i, pidx;
|
348 |
+
if((n < 0) || (k <= 0)) { return -1; }
|
349 |
+
if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
|
350 |
+
try {
|
351 |
+
pidx = saisxx_private::suffixsort(T, A, 0, n, k, true);
|
352 |
+
if(0 <= pidx) {
|
353 |
+
U[0] = T[n - 1];
|
354 |
+
for(i = 0; i < pidx; ++i) { U[i + 1] = (char_type)A[i]; }
|
355 |
+
for(i += 1; i < n; ++i) { U[i] = (char_type)A[i]; }
|
356 |
+
pidx += 1;
|
357 |
+
}
|
358 |
+
} catch(...) { pidx = -2; }
|
359 |
+
return pidx;
|
360 |
+
}
|
361 |
+
|
362 |
+
|
363 |
+
#endif /* __cplusplus */
|
364 |
+
#endif /* _SAIS_HXX */
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/LICENSE
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2008 Google Inc. All rights reserved.
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without
|
4 |
+
modification, are permitted provided that the following conditions are
|
5 |
+
met:
|
6 |
+
|
7 |
+
* Redistributions of source code must retain the above copyright
|
8 |
+
notice, this list of conditions and the following disclaimer.
|
9 |
+
* Redistributions in binary form must reproduce the above
|
10 |
+
copyright notice, this list of conditions and the following disclaimer
|
11 |
+
in the documentation and/or other materials provided with the
|
12 |
+
distribution.
|
13 |
+
* Neither the name of Google Inc. nor the names of its
|
14 |
+
contributors may be used to endorse or promote products derived from
|
15 |
+
this software without specific prior written permission.
|
16 |
+
|
17 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
18 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
19 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
20 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
21 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
22 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
23 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
24 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
25 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
26 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
27 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
28 |
+
|
29 |
+
Code generated by the Protocol Buffer compiler is owned by the owner
|
30 |
+
of the input file used when generating it. This code is not
|
31 |
+
standalone and requires a support library to be linked with it. This
|
32 |
+
support library is itself covered by the above license.
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arena.cc
ADDED
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#include <google/protobuf/arena.h>
|
32 |
+
|
33 |
+
#include <algorithm>
|
34 |
+
#include <limits>
|
35 |
+
|
36 |
+
|
37 |
+
#ifdef ADDRESS_SANITIZER
|
38 |
+
#include <sanitizer/asan_interface.h>
|
39 |
+
#endif // ADDRESS_SANITIZER
|
40 |
+
|
41 |
+
#include <google/protobuf/stubs/port.h>
|
42 |
+
|
43 |
+
namespace google {
|
44 |
+
static const size_t kMinCleanupListElements = 8;
|
45 |
+
static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
|
46 |
+
|
47 |
+
namespace protobuf {
|
48 |
+
namespace internal {
|
49 |
+
|
50 |
+
|
51 |
+
std::atomic<int64> ArenaImpl::lifecycle_id_generator_;
|
52 |
+
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
|
53 |
+
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
|
54 |
+
static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
|
55 |
+
new internal::ThreadLocalStorage<ThreadCache>();
|
56 |
+
return *thread_cache_->Get();
|
57 |
+
}
|
58 |
+
#elif defined(PROTOBUF_USE_DLLS)
|
59 |
+
ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
|
60 |
+
static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
|
61 |
+
return thread_cache_;
|
62 |
+
}
|
63 |
+
#else
|
64 |
+
GOOGLE_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {-1, NULL};
|
65 |
+
#endif
|
66 |
+
|
67 |
+
void ArenaImpl::Init() {
|
68 |
+
lifecycle_id_ =
|
69 |
+
lifecycle_id_generator_.fetch_add(1, std::memory_order_relaxed);
|
70 |
+
hint_.store(nullptr, std::memory_order_relaxed);
|
71 |
+
threads_.store(nullptr, std::memory_order_relaxed);
|
72 |
+
|
73 |
+
if (initial_block_) {
|
74 |
+
// Thread which calls Init() owns the first block. This allows the
|
75 |
+
// single-threaded case to allocate on the first block without having to
|
76 |
+
// perform atomic operations.
|
77 |
+
new (initial_block_) Block(options_.initial_block_size, NULL);
|
78 |
+
SerialArena* serial =
|
79 |
+
SerialArena::New(initial_block_, &thread_cache(), this);
|
80 |
+
serial->set_next(NULL);
|
81 |
+
threads_.store(serial, std::memory_order_relaxed);
|
82 |
+
space_allocated_.store(options_.initial_block_size,
|
83 |
+
std::memory_order_relaxed);
|
84 |
+
CacheSerialArena(serial);
|
85 |
+
} else {
|
86 |
+
space_allocated_.store(0, std::memory_order_relaxed);
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
ArenaImpl::~ArenaImpl() {
|
91 |
+
// Have to do this in a first pass, because some of the destructors might
|
92 |
+
// refer to memory in other blocks.
|
93 |
+
CleanupList();
|
94 |
+
FreeBlocks();
|
95 |
+
}
|
96 |
+
|
97 |
+
uint64 ArenaImpl::Reset() {
|
98 |
+
// Have to do this in a first pass, because some of the destructors might
|
99 |
+
// refer to memory in other blocks.
|
100 |
+
CleanupList();
|
101 |
+
uint64 space_allocated = FreeBlocks();
|
102 |
+
Init();
|
103 |
+
|
104 |
+
return space_allocated;
|
105 |
+
}
|
106 |
+
|
107 |
+
ArenaImpl::Block* ArenaImpl::NewBlock(Block* last_block, size_t min_bytes) {
|
108 |
+
size_t size;
|
109 |
+
if (last_block) {
|
110 |
+
// Double the current block size, up to a limit.
|
111 |
+
size = std::min(2 * last_block->size(), options_.max_block_size);
|
112 |
+
} else {
|
113 |
+
size = options_.start_block_size;
|
114 |
+
}
|
115 |
+
// Verify that min_bytes + kBlockHeaderSize won't overflow.
|
116 |
+
GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
|
117 |
+
size = std::max(size, kBlockHeaderSize + min_bytes);
|
118 |
+
|
119 |
+
void* mem = options_.block_alloc(size);
|
120 |
+
Block* b = new (mem) Block(size, last_block);
|
121 |
+
space_allocated_.fetch_add(size, std::memory_order_relaxed);
|
122 |
+
return b;
|
123 |
+
}
|
124 |
+
|
125 |
+
ArenaImpl::Block::Block(size_t size, Block* next)
|
126 |
+
: next_(next), pos_(kBlockHeaderSize), size_(size) {}
|
127 |
+
|
128 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
129 |
+
void ArenaImpl::SerialArena::AddCleanupFallback(void* elem,
|
130 |
+
void (*cleanup)(void*)) {
|
131 |
+
size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
|
132 |
+
size = std::min(size, kMaxCleanupListElements);
|
133 |
+
size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
|
134 |
+
CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
|
135 |
+
list->next = cleanup_;
|
136 |
+
list->size = size;
|
137 |
+
|
138 |
+
cleanup_ = list;
|
139 |
+
cleanup_ptr_ = &list->nodes[0];
|
140 |
+
cleanup_limit_ = &list->nodes[size];
|
141 |
+
|
142 |
+
AddCleanup(elem, cleanup);
|
143 |
+
}
|
144 |
+
|
145 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_FUNC_ALIGN(32)
|
146 |
+
void* ArenaImpl::AllocateAligned(size_t n) {
|
147 |
+
SerialArena* arena;
|
148 |
+
if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
|
149 |
+
return arena->AllocateAligned(n);
|
150 |
+
} else {
|
151 |
+
return AllocateAlignedFallback(n);
|
152 |
+
}
|
153 |
+
}
|
154 |
+
|
155 |
+
void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
|
156 |
+
void (*cleanup)(void*)) {
|
157 |
+
SerialArena* arena;
|
158 |
+
if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
|
159 |
+
return arena->AllocateAlignedAndAddCleanup(n, cleanup);
|
160 |
+
} else {
|
161 |
+
return AllocateAlignedAndAddCleanupFallback(n, cleanup);
|
162 |
+
}
|
163 |
+
}
|
164 |
+
|
165 |
+
void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
|
166 |
+
SerialArena* arena;
|
167 |
+
if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
|
168 |
+
arena->AddCleanup(elem, cleanup);
|
169 |
+
} else {
|
170 |
+
return AddCleanupFallback(elem, cleanup);
|
171 |
+
}
|
172 |
+
}
|
173 |
+
|
174 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
175 |
+
void* ArenaImpl::AllocateAlignedFallback(size_t n) {
|
176 |
+
return GetSerialArena()->AllocateAligned(n);
|
177 |
+
}
|
178 |
+
|
179 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
180 |
+
void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
|
181 |
+
void (*cleanup)(void*)) {
|
182 |
+
return GetSerialArena()->AllocateAlignedAndAddCleanup(n, cleanup);
|
183 |
+
}
|
184 |
+
|
185 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
186 |
+
void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
|
187 |
+
GetSerialArena()->AddCleanup(elem, cleanup);
|
188 |
+
}
|
189 |
+
|
190 |
+
inline GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
191 |
+
bool ArenaImpl::GetSerialArenaFast(ArenaImpl::SerialArena** arena) {
|
192 |
+
// If this thread already owns a block in this arena then try to use that.
|
193 |
+
// This fast path optimizes the case where multiple threads allocate from the
|
194 |
+
// same arena.
|
195 |
+
ThreadCache* tc = &thread_cache();
|
196 |
+
if (GOOGLE_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) {
|
197 |
+
*arena = tc->last_serial_arena;
|
198 |
+
return true;
|
199 |
+
}
|
200 |
+
|
201 |
+
// Check whether we own the last accessed SerialArena on this arena. This
|
202 |
+
// fast path optimizes the case where a single thread uses multiple arenas.
|
203 |
+
SerialArena* serial = hint_.load(std::memory_order_acquire);
|
204 |
+
if (GOOGLE_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) {
|
205 |
+
*arena = serial;
|
206 |
+
return true;
|
207 |
+
}
|
208 |
+
|
209 |
+
return false;
|
210 |
+
}
|
211 |
+
|
212 |
+
ArenaImpl::SerialArena* ArenaImpl::GetSerialArena() {
|
213 |
+
SerialArena* arena;
|
214 |
+
if (GOOGLE_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
|
215 |
+
return arena;
|
216 |
+
} else {
|
217 |
+
return GetSerialArenaFallback(&thread_cache());
|
218 |
+
}
|
219 |
+
}
|
220 |
+
|
221 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
222 |
+
void* ArenaImpl::SerialArena::AllocateAlignedFallback(size_t n) {
|
223 |
+
// Sync back to current's pos.
|
224 |
+
head_->set_pos(head_->size() - (limit_ - ptr_));
|
225 |
+
|
226 |
+
head_ = arena_->NewBlock(head_, n);
|
227 |
+
ptr_ = head_->Pointer(head_->pos());
|
228 |
+
limit_ = head_->Pointer(head_->size());
|
229 |
+
|
230 |
+
#ifdef ADDRESS_SANITIZER
|
231 |
+
ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
|
232 |
+
#endif // ADDRESS_SANITIZER
|
233 |
+
|
234 |
+
return AllocateAligned(n);
|
235 |
+
}
|
236 |
+
|
237 |
+
uint64 ArenaImpl::SpaceAllocated() const {
|
238 |
+
return space_allocated_.load(std::memory_order_relaxed);
|
239 |
+
}
|
240 |
+
|
241 |
+
uint64 ArenaImpl::SpaceUsed() const {
|
242 |
+
SerialArena* serial = threads_.load(std::memory_order_acquire);
|
243 |
+
uint64 space_used = 0;
|
244 |
+
for ( ; serial; serial = serial->next()) {
|
245 |
+
space_used += serial->SpaceUsed();
|
246 |
+
}
|
247 |
+
return space_used;
|
248 |
+
}
|
249 |
+
|
250 |
+
uint64 ArenaImpl::SerialArena::SpaceUsed() const {
|
251 |
+
// Get current block's size from ptr_ (since we can't trust head_->pos().
|
252 |
+
uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
|
253 |
+
// Get subsequent block size from b->pos().
|
254 |
+
for (Block* b = head_->next(); b; b = b->next()) {
|
255 |
+
space_used += (b->pos() - kBlockHeaderSize);
|
256 |
+
}
|
257 |
+
// Remove the overhead of the SerialArena itself.
|
258 |
+
space_used -= kSerialArenaSize;
|
259 |
+
return space_used;
|
260 |
+
}
|
261 |
+
|
262 |
+
uint64 ArenaImpl::FreeBlocks() {
|
263 |
+
uint64 space_allocated = 0;
|
264 |
+
// By omitting an Acquire barrier we ensure that any user code that doesn't
|
265 |
+
// properly synchronize Reset() or the destructor will throw a TSAN warning.
|
266 |
+
SerialArena* serial = threads_.load(std::memory_order_relaxed);
|
267 |
+
|
268 |
+
while (serial) {
|
269 |
+
// This is inside a block we are freeing, so we need to read it now.
|
270 |
+
SerialArena* next = serial->next();
|
271 |
+
space_allocated += ArenaImpl::SerialArena::Free(serial, initial_block_,
|
272 |
+
options_.block_dealloc);
|
273 |
+
// serial is dead now.
|
274 |
+
serial = next;
|
275 |
+
}
|
276 |
+
|
277 |
+
return space_allocated;
|
278 |
+
}
|
279 |
+
|
280 |
+
uint64 ArenaImpl::SerialArena::Free(ArenaImpl::SerialArena* serial,
|
281 |
+
Block* initial_block,
|
282 |
+
void (*block_dealloc)(void*, size_t)) {
|
283 |
+
uint64 space_allocated = 0;
|
284 |
+
|
285 |
+
// We have to be careful in this function, since we will be freeing the Block
|
286 |
+
// that contains this SerialArena. Be careful about accessing |serial|.
|
287 |
+
|
288 |
+
for (Block* b = serial->head_; b; ) {
|
289 |
+
// This is inside the block we are freeing, so we need to read it now.
|
290 |
+
Block* next_block = b->next();
|
291 |
+
space_allocated += (b->size());
|
292 |
+
|
293 |
+
#ifdef ADDRESS_SANITIZER
|
294 |
+
// This memory was provided by the underlying allocator as unpoisoned, so
|
295 |
+
// return it in an unpoisoned state.
|
296 |
+
ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
|
297 |
+
#endif // ADDRESS_SANITIZER
|
298 |
+
|
299 |
+
if (b != initial_block) {
|
300 |
+
block_dealloc(b, b->size());
|
301 |
+
}
|
302 |
+
|
303 |
+
b = next_block;
|
304 |
+
}
|
305 |
+
|
306 |
+
return space_allocated;
|
307 |
+
}
|
308 |
+
|
309 |
+
void ArenaImpl::CleanupList() {
|
310 |
+
// By omitting an Acquire barrier we ensure that any user code that doesn't
|
311 |
+
// properly synchronize Reset() or the destructor will throw a TSAN warning.
|
312 |
+
SerialArena* serial = threads_.load(std::memory_order_relaxed);
|
313 |
+
|
314 |
+
for ( ; serial; serial = serial->next()) {
|
315 |
+
serial->CleanupList();
|
316 |
+
}
|
317 |
+
}
|
318 |
+
|
319 |
+
void ArenaImpl::SerialArena::CleanupList() {
|
320 |
+
if (cleanup_ != NULL) {
|
321 |
+
CleanupListFallback();
|
322 |
+
}
|
323 |
+
}
|
324 |
+
|
325 |
+
void ArenaImpl::SerialArena::CleanupListFallback() {
|
326 |
+
// Cleanup newest chunk: ptrs give us length.
|
327 |
+
size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
|
328 |
+
CleanupNode* node = cleanup_ptr_;
|
329 |
+
for (size_t i = 0; i < n; i++) {
|
330 |
+
--node;
|
331 |
+
node->cleanup(node->elem);
|
332 |
+
}
|
333 |
+
|
334 |
+
// Cleanup older chunks, which are known to be full.
|
335 |
+
CleanupChunk* list = cleanup_->next;
|
336 |
+
while (list) {
|
337 |
+
size_t n = list->size;
|
338 |
+
CleanupNode* node = &list->nodes[list->size];
|
339 |
+
for (size_t i = 0; i < n; i++) {
|
340 |
+
--node;
|
341 |
+
node->cleanup(node->elem);
|
342 |
+
}
|
343 |
+
list = list->next;
|
344 |
+
}
|
345 |
+
}
|
346 |
+
|
347 |
+
ArenaImpl::SerialArena* ArenaImpl::SerialArena::New(Block* b, void* owner,
|
348 |
+
ArenaImpl* arena) {
|
349 |
+
GOOGLE_DCHECK_EQ(b->pos(), kBlockHeaderSize); // Should be a fresh block
|
350 |
+
GOOGLE_DCHECK_LE(kBlockHeaderSize + kSerialArenaSize, b->size());
|
351 |
+
SerialArena* serial =
|
352 |
+
reinterpret_cast<SerialArena*>(b->Pointer(kBlockHeaderSize));
|
353 |
+
b->set_pos(kBlockHeaderSize + kSerialArenaSize);
|
354 |
+
serial->arena_ = arena;
|
355 |
+
serial->owner_ = owner;
|
356 |
+
serial->head_ = b;
|
357 |
+
serial->ptr_ = b->Pointer(b->pos());
|
358 |
+
serial->limit_ = b->Pointer(b->size());
|
359 |
+
serial->cleanup_ = NULL;
|
360 |
+
serial->cleanup_ptr_ = NULL;
|
361 |
+
serial->cleanup_limit_ = NULL;
|
362 |
+
return serial;
|
363 |
+
}
|
364 |
+
|
365 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
366 |
+
ArenaImpl::SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
|
367 |
+
// Look for this SerialArena in our linked list.
|
368 |
+
SerialArena* serial = threads_.load(std::memory_order_acquire);
|
369 |
+
for ( ; serial; serial = serial->next()) {
|
370 |
+
if (serial->owner() == me) {
|
371 |
+
break;
|
372 |
+
}
|
373 |
+
}
|
374 |
+
|
375 |
+
if (!serial) {
|
376 |
+
// This thread doesn't have any SerialArena, which also means it doesn't
|
377 |
+
// have any blocks yet. So we'll allocate its first block now.
|
378 |
+
Block* b = NewBlock(NULL, kSerialArenaSize);
|
379 |
+
serial = SerialArena::New(b, me, this);
|
380 |
+
|
381 |
+
SerialArena* head = threads_.load(std::memory_order_relaxed);
|
382 |
+
do {
|
383 |
+
serial->set_next(head);
|
384 |
+
} while (!threads_.compare_exchange_weak(
|
385 |
+
head, serial, std::memory_order_release, std::memory_order_relaxed));
|
386 |
+
}
|
387 |
+
|
388 |
+
CacheSerialArena(serial);
|
389 |
+
return serial;
|
390 |
+
}
|
391 |
+
|
392 |
+
} // namespace internal
|
393 |
+
|
394 |
+
void Arena::CallDestructorHooks() {
|
395 |
+
uint64 space_allocated = impl_.SpaceAllocated();
|
396 |
+
// Call the reset hook
|
397 |
+
if (on_arena_reset_ != NULL) {
|
398 |
+
on_arena_reset_(this, hooks_cookie_, space_allocated);
|
399 |
+
}
|
400 |
+
|
401 |
+
// Call the destruction hook
|
402 |
+
if (on_arena_destruction_ != NULL) {
|
403 |
+
on_arena_destruction_(this, hooks_cookie_, space_allocated);
|
404 |
+
}
|
405 |
+
}
|
406 |
+
|
407 |
+
void Arena::OnArenaAllocation(const std::type_info* allocated_type,
|
408 |
+
size_t n) const {
|
409 |
+
if (on_arena_allocation_ != NULL) {
|
410 |
+
on_arena_allocation_(allocated_type, n, hooks_cookie_);
|
411 |
+
}
|
412 |
+
}
|
413 |
+
|
414 |
+
} // namespace protobuf
|
415 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/arenastring.cc
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// The ArenaString implementation is not included in the open-source release. Do
|
32 |
+
// not include this file in the distribution.
|
33 |
+
|
34 |
+
#include <google/protobuf/arenastring.h>
|
35 |
+
|
36 |
+
namespace google {
|
37 |
+
namespace protobuf {
|
38 |
+
namespace internal {
|
39 |
+
|
40 |
+
|
41 |
+
} // namespace internal
|
42 |
+
} // namespace protobuf
|
43 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/bytestream.cc
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#include <google/protobuf/stubs/bytestream.h>
|
32 |
+
|
33 |
+
#include <string.h>
|
34 |
+
#include <algorithm>
|
35 |
+
|
36 |
+
namespace google {
|
37 |
+
namespace protobuf {
|
38 |
+
namespace strings {
|
39 |
+
|
40 |
+
void ByteSource::CopyTo(ByteSink* sink, size_t n) {
|
41 |
+
while (n > 0) {
|
42 |
+
StringPiece fragment = Peek();
|
43 |
+
if (fragment.empty()) {
|
44 |
+
GOOGLE_LOG(DFATAL) << "ByteSource::CopyTo() overran input.";
|
45 |
+
break;
|
46 |
+
}
|
47 |
+
std::size_t fragment_size = std::min<std::size_t>(n, fragment.size());
|
48 |
+
sink->Append(fragment.data(), fragment_size);
|
49 |
+
Skip(fragment_size);
|
50 |
+
n -= fragment_size;
|
51 |
+
}
|
52 |
+
}
|
53 |
+
|
54 |
+
void ByteSink::Flush() {}
|
55 |
+
|
56 |
+
void UncheckedArrayByteSink::Append(const char* data, size_t n) {
|
57 |
+
if (data != dest_) {
|
58 |
+
// Catch cases where the pointer returned by GetAppendBuffer() was modified.
|
59 |
+
GOOGLE_DCHECK(!(dest_ <= data && data < (dest_ + n)))
|
60 |
+
<< "Append() data[] overlaps with dest_[]";
|
61 |
+
memcpy(dest_, data, n);
|
62 |
+
}
|
63 |
+
dest_ += n;
|
64 |
+
}
|
65 |
+
|
66 |
+
CheckedArrayByteSink::CheckedArrayByteSink(char* outbuf, size_t capacity)
|
67 |
+
: outbuf_(outbuf), capacity_(capacity), size_(0), overflowed_(false) {
|
68 |
+
}
|
69 |
+
|
70 |
+
void CheckedArrayByteSink::Append(const char* bytes, size_t n) {
|
71 |
+
size_t available = capacity_ - size_;
|
72 |
+
if (n > available) {
|
73 |
+
n = available;
|
74 |
+
overflowed_ = true;
|
75 |
+
}
|
76 |
+
if (n > 0 && bytes != (outbuf_ + size_)) {
|
77 |
+
// Catch cases where the pointer returned by GetAppendBuffer() was modified.
|
78 |
+
GOOGLE_DCHECK(!(outbuf_ <= bytes && bytes < (outbuf_ + capacity_)))
|
79 |
+
<< "Append() bytes[] overlaps with outbuf_[]";
|
80 |
+
memcpy(outbuf_ + size_, bytes, n);
|
81 |
+
}
|
82 |
+
size_ += n;
|
83 |
+
}
|
84 |
+
|
85 |
+
GrowingArrayByteSink::GrowingArrayByteSink(size_t estimated_size)
|
86 |
+
: capacity_(estimated_size),
|
87 |
+
buf_(new char[estimated_size]),
|
88 |
+
size_(0) {
|
89 |
+
}
|
90 |
+
|
91 |
+
GrowingArrayByteSink::~GrowingArrayByteSink() {
|
92 |
+
delete[] buf_; // Just in case the user didn't call GetBuffer.
|
93 |
+
}
|
94 |
+
|
95 |
+
void GrowingArrayByteSink::Append(const char* bytes, size_t n) {
|
96 |
+
size_t available = capacity_ - size_;
|
97 |
+
if (bytes != (buf_ + size_)) {
|
98 |
+
// Catch cases where the pointer returned by GetAppendBuffer() was modified.
|
99 |
+
// We need to test for this before calling Expand() which may reallocate.
|
100 |
+
GOOGLE_DCHECK(!(buf_ <= bytes && bytes < (buf_ + capacity_)))
|
101 |
+
<< "Append() bytes[] overlaps with buf_[]";
|
102 |
+
}
|
103 |
+
if (n > available) {
|
104 |
+
Expand(n - available);
|
105 |
+
}
|
106 |
+
if (n > 0 && bytes != (buf_ + size_)) {
|
107 |
+
memcpy(buf_ + size_, bytes, n);
|
108 |
+
}
|
109 |
+
size_ += n;
|
110 |
+
}
|
111 |
+
|
112 |
+
char* GrowingArrayByteSink::GetBuffer(size_t* nbytes) {
|
113 |
+
ShrinkToFit();
|
114 |
+
char* b = buf_;
|
115 |
+
*nbytes = size_;
|
116 |
+
buf_ = NULL;
|
117 |
+
size_ = capacity_ = 0;
|
118 |
+
return b;
|
119 |
+
}
|
120 |
+
|
121 |
+
void GrowingArrayByteSink::Expand(size_t amount) { // Expand by at least 50%.
|
122 |
+
size_t new_capacity = std::max(capacity_ + amount, (3 * capacity_) / 2);
|
123 |
+
char* bigger = new char[new_capacity];
|
124 |
+
memcpy(bigger, buf_, size_);
|
125 |
+
delete[] buf_;
|
126 |
+
buf_ = bigger;
|
127 |
+
capacity_ = new_capacity;
|
128 |
+
}
|
129 |
+
|
130 |
+
void GrowingArrayByteSink::ShrinkToFit() {
|
131 |
+
// Shrink only if the buffer is large and size_ is less than 3/4
|
132 |
+
// of capacity_.
|
133 |
+
if (capacity_ > 256 && size_ < (3 * capacity_) / 4) {
|
134 |
+
char* just_enough = new char[size_];
|
135 |
+
memcpy(just_enough, buf_, size_);
|
136 |
+
delete[] buf_;
|
137 |
+
buf_ = just_enough;
|
138 |
+
capacity_ = size_;
|
139 |
+
}
|
140 |
+
}
|
141 |
+
|
142 |
+
void StringByteSink::Append(const char* data, size_t n) {
|
143 |
+
dest_->append(data, n);
|
144 |
+
}
|
145 |
+
|
146 |
+
size_t ArrayByteSource::Available() const {
|
147 |
+
return input_.size();
|
148 |
+
}
|
149 |
+
|
150 |
+
StringPiece ArrayByteSource::Peek() {
|
151 |
+
return input_;
|
152 |
+
}
|
153 |
+
|
154 |
+
void ArrayByteSource::Skip(size_t n) {
|
155 |
+
GOOGLE_DCHECK_LE(n, input_.size());
|
156 |
+
input_.remove_prefix(n);
|
157 |
+
}
|
158 |
+
|
159 |
+
LimitByteSource::LimitByteSource(ByteSource *source, size_t limit)
|
160 |
+
: source_(source),
|
161 |
+
limit_(limit) {
|
162 |
+
}
|
163 |
+
|
164 |
+
size_t LimitByteSource::Available() const {
|
165 |
+
size_t available = source_->Available();
|
166 |
+
if (available > limit_) {
|
167 |
+
available = limit_;
|
168 |
+
}
|
169 |
+
|
170 |
+
return available;
|
171 |
+
}
|
172 |
+
|
173 |
+
StringPiece LimitByteSource::Peek() {
|
174 |
+
StringPiece piece(source_->Peek());
|
175 |
+
if (piece.size() > limit_) {
|
176 |
+
piece.set(piece.data(), limit_);
|
177 |
+
}
|
178 |
+
|
179 |
+
return piece;
|
180 |
+
}
|
181 |
+
|
182 |
+
void LimitByteSource::Skip(size_t n) {
|
183 |
+
GOOGLE_DCHECK_LE(n, limit_);
|
184 |
+
source_->Skip(n);
|
185 |
+
limit_ -= n;
|
186 |
+
}
|
187 |
+
|
188 |
+
void LimitByteSource::CopyTo(ByteSink *sink, size_t n) {
|
189 |
+
GOOGLE_DCHECK_LE(n, limit_);
|
190 |
+
source_->CopyTo(sink, n);
|
191 |
+
limit_ -= n;
|
192 |
+
}
|
193 |
+
|
194 |
+
} // namespace strings
|
195 |
+
} // namespace protobuf
|
196 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/coded_stream.cc
ADDED
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This implementation is heavily optimized to make reads and writes
|
36 |
+
// of small values (especially varints) as fast as possible. In
|
37 |
+
// particular, we optimize for the common case that a read or a write
|
38 |
+
// will not cross the end of the buffer, since we can avoid a lot
|
39 |
+
// of branching in this case.
|
40 |
+
|
41 |
+
#include <google/protobuf/io/coded_stream_inl.h>
|
42 |
+
#include <algorithm>
|
43 |
+
#include <utility>
|
44 |
+
#include <limits.h>
|
45 |
+
#include <google/protobuf/io/zero_copy_stream.h>
|
46 |
+
#include <google/protobuf/arena.h>
|
47 |
+
#include <google/protobuf/stubs/logging.h>
|
48 |
+
#include <google/protobuf/stubs/common.h>
|
49 |
+
#include <google/protobuf/stubs/stl_util.h>
|
50 |
+
|
51 |
+
|
52 |
+
namespace google {
|
53 |
+
namespace protobuf {
|
54 |
+
namespace io {
|
55 |
+
|
56 |
+
namespace {
|
57 |
+
|
58 |
+
static const int kMaxVarintBytes = 10;
|
59 |
+
static const int kMaxVarint32Bytes = 5;
|
60 |
+
|
61 |
+
|
62 |
+
inline bool NextNonEmpty(ZeroCopyInputStream* input,
|
63 |
+
const void** data, int* size) {
|
64 |
+
bool success;
|
65 |
+
do {
|
66 |
+
success = input->Next(data, size);
|
67 |
+
} while (success && *size == 0);
|
68 |
+
return success;
|
69 |
+
}
|
70 |
+
|
71 |
+
} // namespace
|
72 |
+
|
73 |
+
// CodedInputStream ==================================================
|
74 |
+
|
75 |
+
CodedInputStream::~CodedInputStream() {
|
76 |
+
if (input_ != NULL) {
|
77 |
+
BackUpInputToCurrentPosition();
|
78 |
+
}
|
79 |
+
}
|
80 |
+
|
81 |
+
// Static.
|
82 |
+
int CodedInputStream::default_recursion_limit_ = 100;
|
83 |
+
|
84 |
+
|
85 |
+
void CodedOutputStream::EnableAliasing(bool enabled) {
|
86 |
+
aliasing_enabled_ = enabled && output_->AllowsAliasing();
|
87 |
+
}
|
88 |
+
|
89 |
+
void CodedInputStream::BackUpInputToCurrentPosition() {
|
90 |
+
int backup_bytes = BufferSize() + buffer_size_after_limit_ + overflow_bytes_;
|
91 |
+
if (backup_bytes > 0) {
|
92 |
+
input_->BackUp(backup_bytes);
|
93 |
+
|
94 |
+
// total_bytes_read_ doesn't include overflow_bytes_.
|
95 |
+
total_bytes_read_ -= BufferSize() + buffer_size_after_limit_;
|
96 |
+
buffer_end_ = buffer_;
|
97 |
+
buffer_size_after_limit_ = 0;
|
98 |
+
overflow_bytes_ = 0;
|
99 |
+
}
|
100 |
+
}
|
101 |
+
|
102 |
+
inline void CodedInputStream::RecomputeBufferLimits() {
|
103 |
+
buffer_end_ += buffer_size_after_limit_;
|
104 |
+
int closest_limit = std::min(current_limit_, total_bytes_limit_);
|
105 |
+
if (closest_limit < total_bytes_read_) {
|
106 |
+
// The limit position is in the current buffer. We must adjust
|
107 |
+
// the buffer size accordingly.
|
108 |
+
buffer_size_after_limit_ = total_bytes_read_ - closest_limit;
|
109 |
+
buffer_end_ -= buffer_size_after_limit_;
|
110 |
+
} else {
|
111 |
+
buffer_size_after_limit_ = 0;
|
112 |
+
}
|
113 |
+
}
|
114 |
+
|
115 |
+
CodedInputStream::Limit CodedInputStream::PushLimit(int byte_limit) {
|
116 |
+
// Current position relative to the beginning of the stream.
|
117 |
+
int current_position = CurrentPosition();
|
118 |
+
|
119 |
+
Limit old_limit = current_limit_;
|
120 |
+
|
121 |
+
// security: byte_limit is possibly evil, so check for negative values
|
122 |
+
// and overflow. Also check that the new requested limit is before the
|
123 |
+
// previous limit; otherwise we continue to enforce the previous limit.
|
124 |
+
if (GOOGLE_PREDICT_TRUE(byte_limit >= 0 &&
|
125 |
+
byte_limit <= INT_MAX - current_position &&
|
126 |
+
byte_limit < current_limit_ - current_position)) {
|
127 |
+
current_limit_ = current_position + byte_limit;
|
128 |
+
RecomputeBufferLimits();
|
129 |
+
}
|
130 |
+
|
131 |
+
return old_limit;
|
132 |
+
}
|
133 |
+
|
134 |
+
void CodedInputStream::PopLimit(Limit limit) {
|
135 |
+
// The limit passed in is actually the *old* limit, which we returned from
|
136 |
+
// PushLimit().
|
137 |
+
current_limit_ = limit;
|
138 |
+
RecomputeBufferLimits();
|
139 |
+
|
140 |
+
// We may no longer be at a legitimate message end. ReadTag() needs to be
|
141 |
+
// called again to find out.
|
142 |
+
legitimate_message_end_ = false;
|
143 |
+
}
|
144 |
+
|
145 |
+
std::pair<CodedInputStream::Limit, int>
|
146 |
+
CodedInputStream::IncrementRecursionDepthAndPushLimit(int byte_limit) {
|
147 |
+
return std::make_pair(PushLimit(byte_limit), --recursion_budget_);
|
148 |
+
}
|
149 |
+
|
150 |
+
CodedInputStream::Limit CodedInputStream::ReadLengthAndPushLimit() {
|
151 |
+
uint32 length;
|
152 |
+
return PushLimit(ReadVarint32(&length) ? length : 0);
|
153 |
+
}
|
154 |
+
|
155 |
+
bool CodedInputStream::DecrementRecursionDepthAndPopLimit(Limit limit) {
|
156 |
+
bool result = ConsumedEntireMessage();
|
157 |
+
PopLimit(limit);
|
158 |
+
GOOGLE_DCHECK_LT(recursion_budget_, recursion_limit_);
|
159 |
+
++recursion_budget_;
|
160 |
+
return result;
|
161 |
+
}
|
162 |
+
|
163 |
+
bool CodedInputStream::CheckEntireMessageConsumedAndPopLimit(Limit limit) {
|
164 |
+
bool result = ConsumedEntireMessage();
|
165 |
+
PopLimit(limit);
|
166 |
+
return result;
|
167 |
+
}
|
168 |
+
|
169 |
+
int CodedInputStream::BytesUntilLimit() const {
|
170 |
+
if (current_limit_ == INT_MAX) return -1;
|
171 |
+
int current_position = CurrentPosition();
|
172 |
+
|
173 |
+
return current_limit_ - current_position;
|
174 |
+
}
|
175 |
+
|
176 |
+
void CodedInputStream::SetTotalBytesLimit(int total_bytes_limit) {
|
177 |
+
// Make sure the limit isn't already past, since this could confuse other
|
178 |
+
// code.
|
179 |
+
int current_position = CurrentPosition();
|
180 |
+
total_bytes_limit_ = std::max(current_position, total_bytes_limit);
|
181 |
+
RecomputeBufferLimits();
|
182 |
+
}
|
183 |
+
|
184 |
+
int CodedInputStream::BytesUntilTotalBytesLimit() const {
|
185 |
+
if (total_bytes_limit_ == INT_MAX) return -1;
|
186 |
+
return total_bytes_limit_ - CurrentPosition();
|
187 |
+
}
|
188 |
+
|
189 |
+
void CodedInputStream::PrintTotalBytesLimitError() {
|
190 |
+
GOOGLE_LOG(ERROR) << "A protocol message was rejected because it was too "
|
191 |
+
"big (more than " << total_bytes_limit_
|
192 |
+
<< " bytes). To increase the limit (or to disable these "
|
193 |
+
"warnings), see CodedInputStream::SetTotalBytesLimit() "
|
194 |
+
"in google/protobuf/io/coded_stream.h.";
|
195 |
+
}
|
196 |
+
|
197 |
+
bool CodedInputStream::SkipFallback(int count, int original_buffer_size) {
|
198 |
+
if (buffer_size_after_limit_ > 0) {
|
199 |
+
// We hit a limit inside this buffer. Advance to the limit and fail.
|
200 |
+
Advance(original_buffer_size);
|
201 |
+
return false;
|
202 |
+
}
|
203 |
+
|
204 |
+
count -= original_buffer_size;
|
205 |
+
buffer_ = NULL;
|
206 |
+
buffer_end_ = buffer_;
|
207 |
+
|
208 |
+
// Make sure this skip doesn't try to skip past the current limit.
|
209 |
+
int closest_limit = std::min(current_limit_, total_bytes_limit_);
|
210 |
+
int bytes_until_limit = closest_limit - total_bytes_read_;
|
211 |
+
if (bytes_until_limit < count) {
|
212 |
+
// We hit the limit. Skip up to it then fail.
|
213 |
+
if (bytes_until_limit > 0) {
|
214 |
+
total_bytes_read_ = closest_limit;
|
215 |
+
input_->Skip(bytes_until_limit);
|
216 |
+
}
|
217 |
+
return false;
|
218 |
+
}
|
219 |
+
|
220 |
+
if (!input_->Skip(count)) {
|
221 |
+
total_bytes_read_ = input_->ByteCount();
|
222 |
+
return false;
|
223 |
+
}
|
224 |
+
total_bytes_read_ += count;
|
225 |
+
return true;
|
226 |
+
}
|
227 |
+
|
228 |
+
bool CodedInputStream::GetDirectBufferPointer(const void** data, int* size) {
|
229 |
+
if (BufferSize() == 0 && !Refresh()) return false;
|
230 |
+
|
231 |
+
*data = buffer_;
|
232 |
+
*size = BufferSize();
|
233 |
+
return true;
|
234 |
+
}
|
235 |
+
|
236 |
+
bool CodedInputStream::ReadRaw(void* buffer, int size) {
|
237 |
+
return InternalReadRawInline(buffer, size);
|
238 |
+
}
|
239 |
+
|
240 |
+
bool CodedInputStream::ReadString(string* buffer, int size) {
|
241 |
+
if (size < 0) return false; // security: size is often user-supplied
|
242 |
+
return InternalReadStringInline(buffer, size);
|
243 |
+
}
|
244 |
+
|
245 |
+
bool CodedInputStream::ReadStringFallback(string* buffer, int size) {
|
246 |
+
if (!buffer->empty()) {
|
247 |
+
buffer->clear();
|
248 |
+
}
|
249 |
+
|
250 |
+
int closest_limit = std::min(current_limit_, total_bytes_limit_);
|
251 |
+
if (closest_limit != INT_MAX) {
|
252 |
+
int bytes_to_limit = closest_limit - CurrentPosition();
|
253 |
+
if (bytes_to_limit > 0 && size > 0 && size <= bytes_to_limit) {
|
254 |
+
buffer->reserve(size);
|
255 |
+
}
|
256 |
+
}
|
257 |
+
|
258 |
+
int current_buffer_size;
|
259 |
+
while ((current_buffer_size = BufferSize()) < size) {
|
260 |
+
// Some STL implementations "helpfully" crash on buffer->append(NULL, 0).
|
261 |
+
if (current_buffer_size != 0) {
|
262 |
+
// Note: string1.append(string2) is O(string2.size()) (as opposed to
|
263 |
+
// O(string1.size() + string2.size()), which would be bad).
|
264 |
+
buffer->append(reinterpret_cast<const char*>(buffer_),
|
265 |
+
current_buffer_size);
|
266 |
+
}
|
267 |
+
size -= current_buffer_size;
|
268 |
+
Advance(current_buffer_size);
|
269 |
+
if (!Refresh()) return false;
|
270 |
+
}
|
271 |
+
|
272 |
+
buffer->append(reinterpret_cast<const char*>(buffer_), size);
|
273 |
+
Advance(size);
|
274 |
+
|
275 |
+
return true;
|
276 |
+
}
|
277 |
+
|
278 |
+
|
279 |
+
bool CodedInputStream::ReadLittleEndian32Fallback(uint32* value) {
|
280 |
+
uint8 bytes[sizeof(*value)];
|
281 |
+
|
282 |
+
const uint8* ptr;
|
283 |
+
if (BufferSize() >= sizeof(*value)) {
|
284 |
+
// Fast path: Enough bytes in the buffer to read directly.
|
285 |
+
ptr = buffer_;
|
286 |
+
Advance(sizeof(*value));
|
287 |
+
} else {
|
288 |
+
// Slow path: Had to read past the end of the buffer.
|
289 |
+
if (!ReadRaw(bytes, sizeof(*value))) return false;
|
290 |
+
ptr = bytes;
|
291 |
+
}
|
292 |
+
ReadLittleEndian32FromArray(ptr, value);
|
293 |
+
return true;
|
294 |
+
}
|
295 |
+
|
296 |
+
bool CodedInputStream::ReadLittleEndian64Fallback(uint64* value) {
|
297 |
+
uint8 bytes[sizeof(*value)];
|
298 |
+
|
299 |
+
const uint8* ptr;
|
300 |
+
if (BufferSize() >= sizeof(*value)) {
|
301 |
+
// Fast path: Enough bytes in the buffer to read directly.
|
302 |
+
ptr = buffer_;
|
303 |
+
Advance(sizeof(*value));
|
304 |
+
} else {
|
305 |
+
// Slow path: Had to read past the end of the buffer.
|
306 |
+
if (!ReadRaw(bytes, sizeof(*value))) return false;
|
307 |
+
ptr = bytes;
|
308 |
+
}
|
309 |
+
ReadLittleEndian64FromArray(ptr, value);
|
310 |
+
return true;
|
311 |
+
}
|
312 |
+
|
313 |
+
namespace {
|
314 |
+
|
315 |
+
// Read a varint from the given buffer, write it to *value, and return a pair.
|
316 |
+
// The first part of the pair is true iff the read was successful. The second
|
317 |
+
// part is buffer + (number of bytes read). This function is always inlined,
|
318 |
+
// so returning a pair is costless.
|
319 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
320 |
+
::std::pair<bool, const uint8*> ReadVarint32FromArray(
|
321 |
+
uint32 first_byte, const uint8* buffer,
|
322 |
+
uint32* value);
|
323 |
+
inline ::std::pair<bool, const uint8*> ReadVarint32FromArray(
|
324 |
+
uint32 first_byte, const uint8* buffer, uint32* value) {
|
325 |
+
// Fast path: We have enough bytes left in the buffer to guarantee that
|
326 |
+
// this read won't cross the end, so we can skip the checks.
|
327 |
+
GOOGLE_DCHECK_EQ(*buffer, first_byte);
|
328 |
+
GOOGLE_DCHECK_EQ(first_byte & 0x80, 0x80) << first_byte;
|
329 |
+
const uint8* ptr = buffer;
|
330 |
+
uint32 b;
|
331 |
+
uint32 result = first_byte - 0x80;
|
332 |
+
++ptr; // We just processed the first byte. Move on to the second.
|
333 |
+
b = *(ptr++); result += b << 7; if (!(b & 0x80)) goto done;
|
334 |
+
result -= 0x80 << 7;
|
335 |
+
b = *(ptr++); result += b << 14; if (!(b & 0x80)) goto done;
|
336 |
+
result -= 0x80 << 14;
|
337 |
+
b = *(ptr++); result += b << 21; if (!(b & 0x80)) goto done;
|
338 |
+
result -= 0x80 << 21;
|
339 |
+
b = *(ptr++); result += b << 28; if (!(b & 0x80)) goto done;
|
340 |
+
// "result -= 0x80 << 28" is irrevelant.
|
341 |
+
|
342 |
+
// If the input is larger than 32 bits, we still need to read it all
|
343 |
+
// and discard the high-order bits.
|
344 |
+
for (int i = 0; i < kMaxVarintBytes - kMaxVarint32Bytes; i++) {
|
345 |
+
b = *(ptr++); if (!(b & 0x80)) goto done;
|
346 |
+
}
|
347 |
+
|
348 |
+
// We have overrun the maximum size of a varint (10 bytes). Assume
|
349 |
+
// the data is corrupt.
|
350 |
+
return std::make_pair(false, ptr);
|
351 |
+
|
352 |
+
done:
|
353 |
+
*value = result;
|
354 |
+
return std::make_pair(true, ptr);
|
355 |
+
}
|
356 |
+
|
357 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE::std::pair<bool, const uint8*>
|
358 |
+
ReadVarint64FromArray(const uint8* buffer, uint64* value);
|
359 |
+
inline ::std::pair<bool, const uint8*> ReadVarint64FromArray(
|
360 |
+
const uint8* buffer, uint64* value) {
|
361 |
+
const uint8* ptr = buffer;
|
362 |
+
uint32 b;
|
363 |
+
|
364 |
+
// Splitting into 32-bit pieces gives better performance on 32-bit
|
365 |
+
// processors.
|
366 |
+
uint32 part0 = 0, part1 = 0, part2 = 0;
|
367 |
+
|
368 |
+
b = *(ptr++); part0 = b ; if (!(b & 0x80)) goto done;
|
369 |
+
part0 -= 0x80;
|
370 |
+
b = *(ptr++); part0 += b << 7; if (!(b & 0x80)) goto done;
|
371 |
+
part0 -= 0x80 << 7;
|
372 |
+
b = *(ptr++); part0 += b << 14; if (!(b & 0x80)) goto done;
|
373 |
+
part0 -= 0x80 << 14;
|
374 |
+
b = *(ptr++); part0 += b << 21; if (!(b & 0x80)) goto done;
|
375 |
+
part0 -= 0x80 << 21;
|
376 |
+
b = *(ptr++); part1 = b ; if (!(b & 0x80)) goto done;
|
377 |
+
part1 -= 0x80;
|
378 |
+
b = *(ptr++); part1 += b << 7; if (!(b & 0x80)) goto done;
|
379 |
+
part1 -= 0x80 << 7;
|
380 |
+
b = *(ptr++); part1 += b << 14; if (!(b & 0x80)) goto done;
|
381 |
+
part1 -= 0x80 << 14;
|
382 |
+
b = *(ptr++); part1 += b << 21; if (!(b & 0x80)) goto done;
|
383 |
+
part1 -= 0x80 << 21;
|
384 |
+
b = *(ptr++); part2 = b ; if (!(b & 0x80)) goto done;
|
385 |
+
part2 -= 0x80;
|
386 |
+
b = *(ptr++); part2 += b << 7; if (!(b & 0x80)) goto done;
|
387 |
+
// "part2 -= 0x80 << 7" is irrelevant because (0x80 << 7) << 56 is 0.
|
388 |
+
|
389 |
+
// We have overrun the maximum size of a varint (10 bytes). Assume
|
390 |
+
// the data is corrupt.
|
391 |
+
return std::make_pair(false, ptr);
|
392 |
+
|
393 |
+
done:
|
394 |
+
*value = (static_cast<uint64>(part0)) |
|
395 |
+
(static_cast<uint64>(part1) << 28) |
|
396 |
+
(static_cast<uint64>(part2) << 56);
|
397 |
+
return std::make_pair(true, ptr);
|
398 |
+
}
|
399 |
+
|
400 |
+
} // namespace
|
401 |
+
|
402 |
+
bool CodedInputStream::ReadVarint32Slow(uint32* value) {
|
403 |
+
// Directly invoke ReadVarint64Fallback, since we already tried to optimize
|
404 |
+
// for one-byte varints.
|
405 |
+
std::pair<uint64, bool> p = ReadVarint64Fallback();
|
406 |
+
*value = static_cast<uint32>(p.first);
|
407 |
+
return p.second;
|
408 |
+
}
|
409 |
+
|
410 |
+
int64 CodedInputStream::ReadVarint32Fallback(uint32 first_byte_or_zero) {
|
411 |
+
if (BufferSize() >= kMaxVarintBytes ||
|
412 |
+
// Optimization: We're also safe if the buffer is non-empty and it ends
|
413 |
+
// with a byte that would terminate a varint.
|
414 |
+
(buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) {
|
415 |
+
GOOGLE_DCHECK_NE(first_byte_or_zero, 0)
|
416 |
+
<< "Caller should provide us with *buffer_ when buffer is non-empty";
|
417 |
+
uint32 temp;
|
418 |
+
::std::pair<bool, const uint8*> p =
|
419 |
+
ReadVarint32FromArray(first_byte_or_zero, buffer_, &temp);
|
420 |
+
if (!p.first) return -1;
|
421 |
+
buffer_ = p.second;
|
422 |
+
return temp;
|
423 |
+
} else {
|
424 |
+
// Really slow case: we will incur the cost of an extra function call here,
|
425 |
+
// but moving this out of line reduces the size of this function, which
|
426 |
+
// improves the common case. In micro benchmarks, this is worth about 10-15%
|
427 |
+
uint32 temp;
|
428 |
+
return ReadVarint32Slow(&temp) ? static_cast<int64>(temp) : -1;
|
429 |
+
}
|
430 |
+
}
|
431 |
+
|
432 |
+
int CodedInputStream::ReadVarintSizeAsIntSlow() {
|
433 |
+
// Directly invoke ReadVarint64Fallback, since we already tried to optimize
|
434 |
+
// for one-byte varints.
|
435 |
+
std::pair<uint64, bool> p = ReadVarint64Fallback();
|
436 |
+
if (!p.second || p.first > static_cast<uint64>(INT_MAX)) return -1;
|
437 |
+
return p.first;
|
438 |
+
}
|
439 |
+
|
440 |
+
int CodedInputStream::ReadVarintSizeAsIntFallback() {
|
441 |
+
if (BufferSize() >= kMaxVarintBytes ||
|
442 |
+
// Optimization: We're also safe if the buffer is non-empty and it ends
|
443 |
+
// with a byte that would terminate a varint.
|
444 |
+
(buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) {
|
445 |
+
uint64 temp;
|
446 |
+
::std::pair<bool, const uint8*> p = ReadVarint64FromArray(buffer_, &temp);
|
447 |
+
if (!p.first || temp > static_cast<uint64>(INT_MAX)) return -1;
|
448 |
+
buffer_ = p.second;
|
449 |
+
return temp;
|
450 |
+
} else {
|
451 |
+
// Really slow case: we will incur the cost of an extra function call here,
|
452 |
+
// but moving this out of line reduces the size of this function, which
|
453 |
+
// improves the common case. In micro benchmarks, this is worth about 10-15%
|
454 |
+
return ReadVarintSizeAsIntSlow();
|
455 |
+
}
|
456 |
+
}
|
457 |
+
|
458 |
+
uint32 CodedInputStream::ReadTagSlow() {
|
459 |
+
if (buffer_ == buffer_end_) {
|
460 |
+
// Call refresh.
|
461 |
+
if (!Refresh()) {
|
462 |
+
// Refresh failed. Make sure that it failed due to EOF, not because
|
463 |
+
// we hit total_bytes_limit_, which, unlike normal limits, is not a
|
464 |
+
// valid place to end a message.
|
465 |
+
int current_position = total_bytes_read_ - buffer_size_after_limit_;
|
466 |
+
if (current_position >= total_bytes_limit_) {
|
467 |
+
// Hit total_bytes_limit_. But if we also hit the normal limit,
|
468 |
+
// we're still OK.
|
469 |
+
legitimate_message_end_ = current_limit_ == total_bytes_limit_;
|
470 |
+
} else {
|
471 |
+
legitimate_message_end_ = true;
|
472 |
+
}
|
473 |
+
return 0;
|
474 |
+
}
|
475 |
+
}
|
476 |
+
|
477 |
+
// For the slow path, just do a 64-bit read. Try to optimize for one-byte tags
|
478 |
+
// again, since we have now refreshed the buffer.
|
479 |
+
uint64 result = 0;
|
480 |
+
if (!ReadVarint64(&result)) return 0;
|
481 |
+
return static_cast<uint32>(result);
|
482 |
+
}
|
483 |
+
|
484 |
+
uint32 CodedInputStream::ReadTagFallback(uint32 first_byte_or_zero) {
|
485 |
+
const int buf_size = BufferSize();
|
486 |
+
if (buf_size >= kMaxVarintBytes ||
|
487 |
+
// Optimization: We're also safe if the buffer is non-empty and it ends
|
488 |
+
// with a byte that would terminate a varint.
|
489 |
+
(buf_size > 0 && !(buffer_end_[-1] & 0x80))) {
|
490 |
+
GOOGLE_DCHECK_EQ(first_byte_or_zero, buffer_[0]);
|
491 |
+
if (first_byte_or_zero == 0) {
|
492 |
+
++buffer_;
|
493 |
+
return 0;
|
494 |
+
}
|
495 |
+
uint32 tag;
|
496 |
+
::std::pair<bool, const uint8*> p =
|
497 |
+
ReadVarint32FromArray(first_byte_or_zero, buffer_, &tag);
|
498 |
+
if (!p.first) {
|
499 |
+
return 0;
|
500 |
+
}
|
501 |
+
buffer_ = p.second;
|
502 |
+
return tag;
|
503 |
+
} else {
|
504 |
+
// We are commonly at a limit when attempting to read tags. Try to quickly
|
505 |
+
// detect this case without making another function call.
|
506 |
+
if ((buf_size == 0) &&
|
507 |
+
((buffer_size_after_limit_ > 0) ||
|
508 |
+
(total_bytes_read_ == current_limit_)) &&
|
509 |
+
// Make sure that the limit we hit is not total_bytes_limit_, since
|
510 |
+
// in that case we still need to call Refresh() so that it prints an
|
511 |
+
// error.
|
512 |
+
total_bytes_read_ - buffer_size_after_limit_ < total_bytes_limit_) {
|
513 |
+
// We hit a byte limit.
|
514 |
+
legitimate_message_end_ = true;
|
515 |
+
return 0;
|
516 |
+
}
|
517 |
+
return ReadTagSlow();
|
518 |
+
}
|
519 |
+
}
|
520 |
+
|
521 |
+
bool CodedInputStream::ReadVarint64Slow(uint64* value) {
|
522 |
+
// Slow path: This read might cross the end of the buffer, so we
|
523 |
+
// need to check and refresh the buffer if and when it does.
|
524 |
+
|
525 |
+
uint64 result = 0;
|
526 |
+
int count = 0;
|
527 |
+
uint32 b;
|
528 |
+
|
529 |
+
do {
|
530 |
+
if (count == kMaxVarintBytes) {
|
531 |
+
*value = 0;
|
532 |
+
return false;
|
533 |
+
}
|
534 |
+
while (buffer_ == buffer_end_) {
|
535 |
+
if (!Refresh()) {
|
536 |
+
*value = 0;
|
537 |
+
return false;
|
538 |
+
}
|
539 |
+
}
|
540 |
+
b = *buffer_;
|
541 |
+
result |= static_cast<uint64>(b & 0x7F) << (7 * count);
|
542 |
+
Advance(1);
|
543 |
+
++count;
|
544 |
+
} while (b & 0x80);
|
545 |
+
|
546 |
+
*value = result;
|
547 |
+
return true;
|
548 |
+
}
|
549 |
+
|
550 |
+
std::pair<uint64, bool> CodedInputStream::ReadVarint64Fallback() {
|
551 |
+
if (BufferSize() >= kMaxVarintBytes ||
|
552 |
+
// Optimization: We're also safe if the buffer is non-empty and it ends
|
553 |
+
// with a byte that would terminate a varint.
|
554 |
+
(buffer_end_ > buffer_ && !(buffer_end_[-1] & 0x80))) {
|
555 |
+
uint64 temp;
|
556 |
+
::std::pair<bool, const uint8*> p = ReadVarint64FromArray(buffer_, &temp);
|
557 |
+
if (!p.first) {
|
558 |
+
return std::make_pair(0, false);
|
559 |
+
}
|
560 |
+
buffer_ = p.second;
|
561 |
+
return std::make_pair(temp, true);
|
562 |
+
} else {
|
563 |
+
uint64 temp;
|
564 |
+
bool success = ReadVarint64Slow(&temp);
|
565 |
+
return std::make_pair(temp, success);
|
566 |
+
}
|
567 |
+
}
|
568 |
+
|
569 |
+
bool CodedInputStream::Refresh() {
|
570 |
+
GOOGLE_DCHECK_EQ(0, BufferSize());
|
571 |
+
|
572 |
+
if (buffer_size_after_limit_ > 0 || overflow_bytes_ > 0 ||
|
573 |
+
total_bytes_read_ == current_limit_) {
|
574 |
+
// We've hit a limit. Stop.
|
575 |
+
int current_position = total_bytes_read_ - buffer_size_after_limit_;
|
576 |
+
|
577 |
+
if (current_position >= total_bytes_limit_ &&
|
578 |
+
total_bytes_limit_ != current_limit_) {
|
579 |
+
// Hit total_bytes_limit_.
|
580 |
+
PrintTotalBytesLimitError();
|
581 |
+
}
|
582 |
+
|
583 |
+
return false;
|
584 |
+
}
|
585 |
+
|
586 |
+
const void* void_buffer;
|
587 |
+
int buffer_size;
|
588 |
+
if (NextNonEmpty(input_, &void_buffer, &buffer_size)) {
|
589 |
+
buffer_ = reinterpret_cast<const uint8*>(void_buffer);
|
590 |
+
buffer_end_ = buffer_ + buffer_size;
|
591 |
+
GOOGLE_CHECK_GE(buffer_size, 0);
|
592 |
+
|
593 |
+
if (total_bytes_read_ <= INT_MAX - buffer_size) {
|
594 |
+
total_bytes_read_ += buffer_size;
|
595 |
+
} else {
|
596 |
+
// Overflow. Reset buffer_end_ to not include the bytes beyond INT_MAX.
|
597 |
+
// We can't get that far anyway, because total_bytes_limit_ is guaranteed
|
598 |
+
// to be less than it. We need to keep track of the number of bytes
|
599 |
+
// we discarded, though, so that we can call input_->BackUp() to back
|
600 |
+
// up over them on destruction.
|
601 |
+
|
602 |
+
// The following line is equivalent to:
|
603 |
+
// overflow_bytes_ = total_bytes_read_ + buffer_size - INT_MAX;
|
604 |
+
// except that it avoids overflows. Signed integer overflow has
|
605 |
+
// undefined results according to the C standard.
|
606 |
+
overflow_bytes_ = total_bytes_read_ - (INT_MAX - buffer_size);
|
607 |
+
buffer_end_ -= overflow_bytes_;
|
608 |
+
total_bytes_read_ = INT_MAX;
|
609 |
+
}
|
610 |
+
|
611 |
+
RecomputeBufferLimits();
|
612 |
+
return true;
|
613 |
+
} else {
|
614 |
+
buffer_ = NULL;
|
615 |
+
buffer_end_ = NULL;
|
616 |
+
return false;
|
617 |
+
}
|
618 |
+
}
|
619 |
+
|
620 |
+
// CodedOutputStream =================================================
|
621 |
+
|
622 |
+
std::atomic<bool> CodedOutputStream::default_serialization_deterministic_{
|
623 |
+
false};
|
624 |
+
|
625 |
+
CodedOutputStream::CodedOutputStream(ZeroCopyOutputStream* output)
|
626 |
+
: CodedOutputStream(output, true) {}
|
627 |
+
|
628 |
+
CodedOutputStream::CodedOutputStream(ZeroCopyOutputStream* output,
|
629 |
+
bool do_eager_refresh)
|
630 |
+
: output_(output),
|
631 |
+
buffer_(NULL),
|
632 |
+
buffer_size_(0),
|
633 |
+
total_bytes_(0),
|
634 |
+
had_error_(false),
|
635 |
+
aliasing_enabled_(false),
|
636 |
+
is_serialization_deterministic_(IsDefaultSerializationDeterministic()) {
|
637 |
+
if (do_eager_refresh) {
|
638 |
+
// Eagerly Refresh() so buffer space is immediately available.
|
639 |
+
Refresh();
|
640 |
+
// The Refresh() may have failed. If the client doesn't write any data,
|
641 |
+
// though, don't consider this an error. If the client does write data, then
|
642 |
+
// another Refresh() will be attempted and it will set the error once again.
|
643 |
+
had_error_ = false;
|
644 |
+
}
|
645 |
+
}
|
646 |
+
|
647 |
+
CodedOutputStream::~CodedOutputStream() {
|
648 |
+
Trim();
|
649 |
+
}
|
650 |
+
|
651 |
+
void CodedOutputStream::Trim() {
|
652 |
+
if (buffer_size_ > 0) {
|
653 |
+
output_->BackUp(buffer_size_);
|
654 |
+
total_bytes_ -= buffer_size_;
|
655 |
+
buffer_size_ = 0;
|
656 |
+
buffer_ = NULL;
|
657 |
+
}
|
658 |
+
}
|
659 |
+
|
660 |
+
bool CodedOutputStream::Skip(int count) {
|
661 |
+
if (count < 0) return false;
|
662 |
+
|
663 |
+
while (count > buffer_size_) {
|
664 |
+
count -= buffer_size_;
|
665 |
+
if (!Refresh()) return false;
|
666 |
+
}
|
667 |
+
|
668 |
+
Advance(count);
|
669 |
+
return true;
|
670 |
+
}
|
671 |
+
|
672 |
+
bool CodedOutputStream::GetDirectBufferPointer(void** data, int* size) {
|
673 |
+
if (buffer_size_ == 0 && !Refresh()) return false;
|
674 |
+
|
675 |
+
*data = buffer_;
|
676 |
+
*size = buffer_size_;
|
677 |
+
return true;
|
678 |
+
}
|
679 |
+
|
680 |
+
void CodedOutputStream::WriteRaw(const void* data, int size) {
|
681 |
+
while (buffer_size_ < size) {
|
682 |
+
memcpy(buffer_, data, buffer_size_);
|
683 |
+
size -= buffer_size_;
|
684 |
+
data = reinterpret_cast<const uint8*>(data) + buffer_size_;
|
685 |
+
if (!Refresh()) return;
|
686 |
+
}
|
687 |
+
|
688 |
+
memcpy(buffer_, data, size);
|
689 |
+
Advance(size);
|
690 |
+
}
|
691 |
+
|
692 |
+
uint8* CodedOutputStream::WriteRawToArray(
|
693 |
+
const void* data, int size, uint8* target) {
|
694 |
+
memcpy(target, data, size);
|
695 |
+
return target + size;
|
696 |
+
}
|
697 |
+
|
698 |
+
|
699 |
+
void CodedOutputStream::WriteAliasedRaw(const void* data, int size) {
|
700 |
+
if (size < buffer_size_
|
701 |
+
) {
|
702 |
+
WriteRaw(data, size);
|
703 |
+
} else {
|
704 |
+
Trim();
|
705 |
+
|
706 |
+
total_bytes_ += size;
|
707 |
+
had_error_ |= !output_->WriteAliasedRaw(data, size);
|
708 |
+
}
|
709 |
+
}
|
710 |
+
|
711 |
+
void CodedOutputStream::WriteLittleEndian32(uint32 value) {
|
712 |
+
uint8 bytes[sizeof(value)];
|
713 |
+
|
714 |
+
bool use_fast = buffer_size_ >= sizeof(value);
|
715 |
+
uint8* ptr = use_fast ? buffer_ : bytes;
|
716 |
+
|
717 |
+
WriteLittleEndian32ToArray(value, ptr);
|
718 |
+
|
719 |
+
if (use_fast) {
|
720 |
+
Advance(sizeof(value));
|
721 |
+
} else {
|
722 |
+
WriteRaw(bytes, sizeof(value));
|
723 |
+
}
|
724 |
+
}
|
725 |
+
|
726 |
+
void CodedOutputStream::WriteLittleEndian64(uint64 value) {
|
727 |
+
uint8 bytes[sizeof(value)];
|
728 |
+
|
729 |
+
bool use_fast = buffer_size_ >= sizeof(value);
|
730 |
+
uint8* ptr = use_fast ? buffer_ : bytes;
|
731 |
+
|
732 |
+
WriteLittleEndian64ToArray(value, ptr);
|
733 |
+
|
734 |
+
if (use_fast) {
|
735 |
+
Advance(sizeof(value));
|
736 |
+
} else {
|
737 |
+
WriteRaw(bytes, sizeof(value));
|
738 |
+
}
|
739 |
+
}
|
740 |
+
|
741 |
+
void CodedOutputStream::WriteVarint32SlowPath(uint32 value) {
|
742 |
+
uint8 bytes[kMaxVarint32Bytes];
|
743 |
+
uint8* target = &bytes[0];
|
744 |
+
uint8* end = WriteVarint32ToArray(value, target);
|
745 |
+
int size = end - target;
|
746 |
+
WriteRaw(bytes, size);
|
747 |
+
}
|
748 |
+
|
749 |
+
void CodedOutputStream::WriteVarint64SlowPath(uint64 value) {
|
750 |
+
uint8 bytes[kMaxVarintBytes];
|
751 |
+
uint8* target = &bytes[0];
|
752 |
+
uint8* end = WriteVarint64ToArray(value, target);
|
753 |
+
int size = end - target;
|
754 |
+
WriteRaw(bytes, size);
|
755 |
+
}
|
756 |
+
|
757 |
+
bool CodedOutputStream::Refresh() {
|
758 |
+
void* void_buffer;
|
759 |
+
if (output_->Next(&void_buffer, &buffer_size_)) {
|
760 |
+
buffer_ = reinterpret_cast<uint8*>(void_buffer);
|
761 |
+
total_bytes_ += buffer_size_;
|
762 |
+
return true;
|
763 |
+
} else {
|
764 |
+
buffer_ = NULL;
|
765 |
+
buffer_size_ = 0;
|
766 |
+
had_error_ = true;
|
767 |
+
return false;
|
768 |
+
}
|
769 |
+
}
|
770 |
+
|
771 |
+
uint8* CodedOutputStream::WriteStringWithSizeToArray(const string& str,
|
772 |
+
uint8* target) {
|
773 |
+
GOOGLE_DCHECK_LE(str.size(), kuint32max);
|
774 |
+
target = WriteVarint32ToArray(str.size(), target);
|
775 |
+
return WriteStringToArray(str, target);
|
776 |
+
}
|
777 |
+
|
778 |
+
} // namespace io
|
779 |
+
} // namespace protobuf
|
780 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/common.cc
ADDED
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
|
33 |
+
#include <google/protobuf/message_lite.h> // TODO(gerbens) ideally remove this.
|
34 |
+
#include <google/protobuf/stubs/common.h>
|
35 |
+
#include <google/protobuf/stubs/once.h>
|
36 |
+
#include <google/protobuf/stubs/status.h>
|
37 |
+
#include <google/protobuf/stubs/stringpiece.h>
|
38 |
+
#include <google/protobuf/stubs/strutil.h>
|
39 |
+
#include <google/protobuf/stubs/int128.h>
|
40 |
+
#include <errno.h>
|
41 |
+
#include <sstream>
|
42 |
+
#include <stdio.h>
|
43 |
+
#include <vector>
|
44 |
+
|
45 |
+
#ifdef _WIN32
|
46 |
+
#define WIN32_LEAN_AND_MEAN // We only need minimal includes
|
47 |
+
#include <windows.h>
|
48 |
+
#define snprintf _snprintf // see comment in strutil.cc
|
49 |
+
#elif defined(HAVE_PTHREAD)
|
50 |
+
#include <pthread.h>
|
51 |
+
#else
|
52 |
+
#error "No suitable threading library available."
|
53 |
+
#endif
|
54 |
+
#if defined(__ANDROID__)
|
55 |
+
#include <android/log.h>
|
56 |
+
#endif
|
57 |
+
|
58 |
+
namespace google {
|
59 |
+
namespace protobuf {
|
60 |
+
|
61 |
+
namespace internal {
|
62 |
+
|
63 |
+
void VerifyVersion(int headerVersion,
|
64 |
+
int minLibraryVersion,
|
65 |
+
const char* filename) {
|
66 |
+
if (GOOGLE_PROTOBUF_VERSION < minLibraryVersion) {
|
67 |
+
// Library is too old for headers.
|
68 |
+
GOOGLE_LOG(FATAL)
|
69 |
+
<< "This program requires version " << VersionString(minLibraryVersion)
|
70 |
+
<< " of the Protocol Buffer runtime library, but the installed version "
|
71 |
+
"is " << VersionString(GOOGLE_PROTOBUF_VERSION) << ". Please update "
|
72 |
+
"your library. If you compiled the program yourself, make sure that "
|
73 |
+
"your headers are from the same version of Protocol Buffers as your "
|
74 |
+
"link-time library. (Version verification failed in \""
|
75 |
+
<< filename << "\".)";
|
76 |
+
}
|
77 |
+
if (headerVersion < kMinHeaderVersionForLibrary) {
|
78 |
+
// Headers are too old for library.
|
79 |
+
GOOGLE_LOG(FATAL)
|
80 |
+
<< "This program was compiled against version "
|
81 |
+
<< VersionString(headerVersion) << " of the Protocol Buffer runtime "
|
82 |
+
"library, which is not compatible with the installed version ("
|
83 |
+
<< VersionString(GOOGLE_PROTOBUF_VERSION) << "). Contact the program "
|
84 |
+
"author for an update. If you compiled the program yourself, make "
|
85 |
+
"sure that your headers are from the same version of Protocol Buffers "
|
86 |
+
"as your link-time library. (Version verification failed in \""
|
87 |
+
<< filename << "\".)";
|
88 |
+
}
|
89 |
+
}
|
90 |
+
|
91 |
+
string VersionString(int version) {
|
92 |
+
int major = version / 1000000;
|
93 |
+
int minor = (version / 1000) % 1000;
|
94 |
+
int micro = version % 1000;
|
95 |
+
|
96 |
+
// 128 bytes should always be enough, but we use snprintf() anyway to be
|
97 |
+
// safe.
|
98 |
+
char buffer[128];
|
99 |
+
snprintf(buffer, sizeof(buffer), "%d.%d.%d", major, minor, micro);
|
100 |
+
|
101 |
+
// Guard against broken MSVC snprintf().
|
102 |
+
buffer[sizeof(buffer)-1] = '\0';
|
103 |
+
|
104 |
+
return buffer;
|
105 |
+
}
|
106 |
+
|
107 |
+
} // namespace internal
|
108 |
+
|
109 |
+
// ===================================================================
|
110 |
+
// emulates google3/base/logging.cc
|
111 |
+
|
112 |
+
// If the minimum logging level is not set, we default to logging messages for
|
113 |
+
// all levels.
|
114 |
+
#ifndef GOOGLE_PROTOBUF_MIN_LOG_LEVEL
|
115 |
+
#define GOOGLE_PROTOBUF_MIN_LOG_LEVEL LOGLEVEL_INFO
|
116 |
+
#endif
|
117 |
+
|
118 |
+
namespace internal {
|
119 |
+
|
120 |
+
#if defined(__ANDROID__)
|
121 |
+
inline void DefaultLogHandler(LogLevel level, const char* filename, int line,
|
122 |
+
const string& message) {
|
123 |
+
if (level < GOOGLE_PROTOBUF_MIN_LOG_LEVEL) {
|
124 |
+
return;
|
125 |
+
}
|
126 |
+
static const char* level_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
|
127 |
+
|
128 |
+
static const int android_log_levels[] = {
|
129 |
+
ANDROID_LOG_INFO, // LOG(INFO),
|
130 |
+
ANDROID_LOG_WARN, // LOG(WARNING)
|
131 |
+
ANDROID_LOG_ERROR, // LOG(ERROR)
|
132 |
+
ANDROID_LOG_FATAL, // LOG(FATAL)
|
133 |
+
};
|
134 |
+
|
135 |
+
// Bound the logging level.
|
136 |
+
const int android_log_level = android_log_levels[level];
|
137 |
+
::std::ostringstream ostr;
|
138 |
+
ostr << "[libprotobuf " << level_names[level] << " " << filename << ":"
|
139 |
+
<< line << "] " << message.c_str();
|
140 |
+
|
141 |
+
// Output the log string the Android log at the appropriate level.
|
142 |
+
__android_log_write(android_log_level, "libprotobuf-native",
|
143 |
+
ostr.str().c_str());
|
144 |
+
// Also output to std::cerr.
|
145 |
+
fprintf(stderr, "%s", ostr.str().c_str());
|
146 |
+
fflush(stderr);
|
147 |
+
|
148 |
+
// Indicate termination if needed.
|
149 |
+
if (android_log_level == ANDROID_LOG_FATAL) {
|
150 |
+
__android_log_write(ANDROID_LOG_FATAL, "libprotobuf-native",
|
151 |
+
"terminating.\n");
|
152 |
+
}
|
153 |
+
}
|
154 |
+
|
155 |
+
#else
|
156 |
+
void DefaultLogHandler(LogLevel level, const char* filename, int line,
|
157 |
+
const string& message) {
|
158 |
+
if (level < GOOGLE_PROTOBUF_MIN_LOG_LEVEL) {
|
159 |
+
return;
|
160 |
+
}
|
161 |
+
static const char* level_names[] = { "INFO", "WARNING", "ERROR", "FATAL" };
|
162 |
+
|
163 |
+
// We use fprintf() instead of cerr because we want this to work at static
|
164 |
+
// initialization time.
|
165 |
+
fprintf(stderr, "[libprotobuf %s %s:%d] %s\n",
|
166 |
+
level_names[level], filename, line, message.c_str());
|
167 |
+
fflush(stderr); // Needed on MSVC.
|
168 |
+
}
|
169 |
+
#endif
|
170 |
+
|
171 |
+
void NullLogHandler(LogLevel /* level */, const char* /* filename */,
|
172 |
+
int /* line */, const string& /* message */) {
|
173 |
+
// Nothing.
|
174 |
+
}
|
175 |
+
|
176 |
+
static LogHandler* log_handler_ = &DefaultLogHandler;
|
177 |
+
static int log_silencer_count_ = 0;
|
178 |
+
|
179 |
+
static Mutex* log_silencer_count_mutex_ = NULL;
|
180 |
+
GOOGLE_PROTOBUF_DECLARE_ONCE(log_silencer_count_init_);
|
181 |
+
|
182 |
+
void DeleteLogSilencerCount() {
|
183 |
+
delete log_silencer_count_mutex_;
|
184 |
+
log_silencer_count_mutex_ = NULL;
|
185 |
+
}
|
186 |
+
void InitLogSilencerCount() {
|
187 |
+
log_silencer_count_mutex_ = new Mutex;
|
188 |
+
OnShutdown(&DeleteLogSilencerCount);
|
189 |
+
}
|
190 |
+
void InitLogSilencerCountOnce() {
|
191 |
+
GoogleOnceInit(&log_silencer_count_init_, &InitLogSilencerCount);
|
192 |
+
}
|
193 |
+
|
194 |
+
LogMessage& LogMessage::operator<<(const string& value) {
|
195 |
+
message_ += value;
|
196 |
+
return *this;
|
197 |
+
}
|
198 |
+
|
199 |
+
LogMessage& LogMessage::operator<<(const char* value) {
|
200 |
+
message_ += value;
|
201 |
+
return *this;
|
202 |
+
}
|
203 |
+
|
204 |
+
LogMessage& LogMessage::operator<<(const StringPiece& value) {
|
205 |
+
message_ += value.ToString();
|
206 |
+
return *this;
|
207 |
+
}
|
208 |
+
|
209 |
+
LogMessage& LogMessage::operator<<(
|
210 |
+
const ::google::protobuf::util::Status& status) {
|
211 |
+
message_ += status.ToString();
|
212 |
+
return *this;
|
213 |
+
}
|
214 |
+
|
215 |
+
LogMessage& LogMessage::operator<<(const uint128& value) {
|
216 |
+
std::ostringstream str;
|
217 |
+
str << value;
|
218 |
+
message_ += str.str();
|
219 |
+
return *this;
|
220 |
+
}
|
221 |
+
|
222 |
+
// Since this is just for logging, we don't care if the current locale changes
|
223 |
+
// the results -- in fact, we probably prefer that. So we use snprintf()
|
224 |
+
// instead of Simple*toa().
|
225 |
+
#undef DECLARE_STREAM_OPERATOR
|
226 |
+
#define DECLARE_STREAM_OPERATOR(TYPE, FORMAT) \
|
227 |
+
LogMessage& LogMessage::operator<<(TYPE value) { \
|
228 |
+
/* 128 bytes should be big enough for any of the primitive */ \
|
229 |
+
/* values which we print with this, but well use snprintf() */ \
|
230 |
+
/* anyway to be extra safe. */ \
|
231 |
+
char buffer[128]; \
|
232 |
+
snprintf(buffer, sizeof(buffer), FORMAT, value); \
|
233 |
+
/* Guard against broken MSVC snprintf(). */ \
|
234 |
+
buffer[sizeof(buffer)-1] = '\0'; \
|
235 |
+
message_ += buffer; \
|
236 |
+
return *this; \
|
237 |
+
}
|
238 |
+
|
239 |
+
DECLARE_STREAM_OPERATOR(char , "%c" )
|
240 |
+
DECLARE_STREAM_OPERATOR(int , "%d" )
|
241 |
+
DECLARE_STREAM_OPERATOR(unsigned int , "%u" )
|
242 |
+
DECLARE_STREAM_OPERATOR(long , "%ld")
|
243 |
+
DECLARE_STREAM_OPERATOR(unsigned long, "%lu")
|
244 |
+
DECLARE_STREAM_OPERATOR(double , "%g" )
|
245 |
+
DECLARE_STREAM_OPERATOR(void* , "%p" )
|
246 |
+
DECLARE_STREAM_OPERATOR(long long , "%" GOOGLE_LL_FORMAT "d")
|
247 |
+
DECLARE_STREAM_OPERATOR(unsigned long long, "%" GOOGLE_LL_FORMAT "u")
|
248 |
+
#undef DECLARE_STREAM_OPERATOR
|
249 |
+
|
250 |
+
LogMessage::LogMessage(LogLevel level, const char* filename, int line)
|
251 |
+
: level_(level), filename_(filename), line_(line) {}
|
252 |
+
LogMessage::~LogMessage() {}
|
253 |
+
|
254 |
+
void LogMessage::Finish() {
|
255 |
+
bool suppress = false;
|
256 |
+
|
257 |
+
if (level_ != LOGLEVEL_FATAL) {
|
258 |
+
InitLogSilencerCountOnce();
|
259 |
+
MutexLock lock(log_silencer_count_mutex_);
|
260 |
+
suppress = log_silencer_count_ > 0;
|
261 |
+
}
|
262 |
+
|
263 |
+
if (!suppress) {
|
264 |
+
log_handler_(level_, filename_, line_, message_);
|
265 |
+
}
|
266 |
+
|
267 |
+
if (level_ == LOGLEVEL_FATAL) {
|
268 |
+
#if PROTOBUF_USE_EXCEPTIONS
|
269 |
+
throw FatalException(filename_, line_, message_);
|
270 |
+
#else
|
271 |
+
abort();
|
272 |
+
#endif
|
273 |
+
}
|
274 |
+
}
|
275 |
+
|
276 |
+
void LogFinisher::operator=(LogMessage& other) {
|
277 |
+
other.Finish();
|
278 |
+
}
|
279 |
+
|
280 |
+
} // namespace internal
|
281 |
+
|
282 |
+
LogHandler* SetLogHandler(LogHandler* new_func) {
|
283 |
+
LogHandler* old = internal::log_handler_;
|
284 |
+
if (old == &internal::NullLogHandler) {
|
285 |
+
old = NULL;
|
286 |
+
}
|
287 |
+
if (new_func == NULL) {
|
288 |
+
internal::log_handler_ = &internal::NullLogHandler;
|
289 |
+
} else {
|
290 |
+
internal::log_handler_ = new_func;
|
291 |
+
}
|
292 |
+
return old;
|
293 |
+
}
|
294 |
+
|
295 |
+
LogSilencer::LogSilencer() {
|
296 |
+
internal::InitLogSilencerCountOnce();
|
297 |
+
MutexLock lock(internal::log_silencer_count_mutex_);
|
298 |
+
++internal::log_silencer_count_;
|
299 |
+
};
|
300 |
+
|
301 |
+
LogSilencer::~LogSilencer() {
|
302 |
+
internal::InitLogSilencerCountOnce();
|
303 |
+
MutexLock lock(internal::log_silencer_count_mutex_);
|
304 |
+
--internal::log_silencer_count_;
|
305 |
+
};
|
306 |
+
|
307 |
+
// ===================================================================
|
308 |
+
// emulates google3/base/callback.cc
|
309 |
+
|
310 |
+
Closure::~Closure() {}
|
311 |
+
|
312 |
+
namespace internal { FunctionClosure0::~FunctionClosure0() {} }
|
313 |
+
|
314 |
+
void DoNothing() {}
|
315 |
+
|
316 |
+
// ===================================================================
|
317 |
+
// emulates google3/util/endian/endian.h
|
318 |
+
//
|
319 |
+
// TODO(xiaofeng): PROTOBUF_LITTLE_ENDIAN is unfortunately defined in
|
320 |
+
// google/protobuf/io/coded_stream.h and therefore can not be used here.
|
321 |
+
// Maybe move that macro definition here in the furture.
|
322 |
+
uint32 ghtonl(uint32 x) {
|
323 |
+
union {
|
324 |
+
uint32 result;
|
325 |
+
uint8 result_array[4];
|
326 |
+
};
|
327 |
+
result_array[0] = static_cast<uint8>(x >> 24);
|
328 |
+
result_array[1] = static_cast<uint8>((x >> 16) & 0xFF);
|
329 |
+
result_array[2] = static_cast<uint8>((x >> 8) & 0xFF);
|
330 |
+
result_array[3] = static_cast<uint8>(x & 0xFF);
|
331 |
+
return result;
|
332 |
+
}
|
333 |
+
|
334 |
+
// ===================================================================
|
335 |
+
// Shutdown support.
|
336 |
+
|
337 |
+
namespace internal {
|
338 |
+
|
339 |
+
typedef void OnShutdownFunc();
|
340 |
+
struct ShutdownData {
|
341 |
+
~ShutdownData() {
|
342 |
+
std::reverse(functions.begin(), functions.end());
|
343 |
+
for (auto pair : functions) pair.first(pair.second);
|
344 |
+
}
|
345 |
+
|
346 |
+
static ShutdownData* get() {
|
347 |
+
static auto* data = new ShutdownData;
|
348 |
+
return data;
|
349 |
+
}
|
350 |
+
|
351 |
+
std::vector<std::pair<void (*)(const void*), const void*>> functions;
|
352 |
+
Mutex mutex;
|
353 |
+
};
|
354 |
+
|
355 |
+
static void RunZeroArgFunc(const void* arg) {
|
356 |
+
reinterpret_cast<void (*)()>(const_cast<void*>(arg))();
|
357 |
+
}
|
358 |
+
|
359 |
+
void OnShutdown(void (*func)()) {
|
360 |
+
OnShutdownRun(RunZeroArgFunc, reinterpret_cast<void*>(func));
|
361 |
+
}
|
362 |
+
|
363 |
+
void OnShutdownRun(void (*f)(const void*), const void* arg) {
|
364 |
+
auto shutdown_data = ShutdownData::get();
|
365 |
+
MutexLock lock(&shutdown_data->mutex);
|
366 |
+
shutdown_data->functions.push_back(std::make_pair(f, arg));
|
367 |
+
}
|
368 |
+
|
369 |
+
} // namespace internal
|
370 |
+
|
371 |
+
void ShutdownProtobufLibrary() {
|
372 |
+
// This function should be called only once, but accepts multiple calls.
|
373 |
+
static bool is_shutdown = false;
|
374 |
+
if (!is_shutdown) {
|
375 |
+
delete internal::ShutdownData::get();
|
376 |
+
is_shutdown = true;
|
377 |
+
}
|
378 |
+
}
|
379 |
+
|
380 |
+
#if PROTOBUF_USE_EXCEPTIONS
|
381 |
+
FatalException::~FatalException() throw() {}
|
382 |
+
|
383 |
+
const char* FatalException::what() const throw() {
|
384 |
+
return message_.c_str();
|
385 |
+
}
|
386 |
+
#endif
|
387 |
+
|
388 |
+
} // namespace protobuf
|
389 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/extension_set.cc
ADDED
@@ -0,0 +1,1916 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
|
35 |
+
#include <google/protobuf/stubs/hash.h>
|
36 |
+
#include <tuple>
|
37 |
+
#include <utility>
|
38 |
+
#include <google/protobuf/stubs/common.h>
|
39 |
+
#include <google/protobuf/extension_set.h>
|
40 |
+
#include <google/protobuf/message_lite.h>
|
41 |
+
#include <google/protobuf/io/coded_stream.h>
|
42 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
43 |
+
#include <google/protobuf/repeated_field.h>
|
44 |
+
#include <google/protobuf/stubs/map_util.h>
|
45 |
+
|
46 |
+
namespace google {
|
47 |
+
namespace protobuf {
|
48 |
+
namespace internal {
|
49 |
+
|
50 |
+
namespace {
|
51 |
+
|
52 |
+
inline WireFormatLite::FieldType real_type(FieldType type) {
|
53 |
+
GOOGLE_DCHECK(type > 0 && type <= WireFormatLite::MAX_FIELD_TYPE);
|
54 |
+
return static_cast<WireFormatLite::FieldType>(type);
|
55 |
+
}
|
56 |
+
|
57 |
+
inline WireFormatLite::CppType cpp_type(FieldType type) {
|
58 |
+
return WireFormatLite::FieldTypeToCppType(real_type(type));
|
59 |
+
}
|
60 |
+
|
61 |
+
inline bool is_packable(WireFormatLite::WireType type) {
|
62 |
+
switch (type) {
|
63 |
+
case WireFormatLite::WIRETYPE_VARINT:
|
64 |
+
case WireFormatLite::WIRETYPE_FIXED64:
|
65 |
+
case WireFormatLite::WIRETYPE_FIXED32:
|
66 |
+
return true;
|
67 |
+
case WireFormatLite::WIRETYPE_LENGTH_DELIMITED:
|
68 |
+
case WireFormatLite::WIRETYPE_START_GROUP:
|
69 |
+
case WireFormatLite::WIRETYPE_END_GROUP:
|
70 |
+
return false;
|
71 |
+
|
72 |
+
// Do not add a default statement. Let the compiler complain when someone
|
73 |
+
// adds a new wire type.
|
74 |
+
}
|
75 |
+
GOOGLE_LOG(FATAL) << "can't reach here.";
|
76 |
+
return false;
|
77 |
+
}
|
78 |
+
|
79 |
+
// Registry stuff.
|
80 |
+
typedef hash_map<std::pair<const MessageLite*, int>,
|
81 |
+
ExtensionInfo> ExtensionRegistry;
|
82 |
+
|
83 |
+
static const ExtensionRegistry* global_registry = nullptr;
|
84 |
+
|
85 |
+
// This function is only called at startup, so there is no need for thread-
|
86 |
+
// safety.
|
87 |
+
void Register(const MessageLite* containing_type,
|
88 |
+
int number, ExtensionInfo info) {
|
89 |
+
static auto local_static_registry = OnShutdownDelete(new ExtensionRegistry);
|
90 |
+
global_registry = local_static_registry;
|
91 |
+
if (!InsertIfNotPresent(local_static_registry,
|
92 |
+
std::make_pair(containing_type, number), info)) {
|
93 |
+
GOOGLE_LOG(FATAL) << "Multiple extension registrations for type \""
|
94 |
+
<< containing_type->GetTypeName()
|
95 |
+
<< "\", field number " << number << ".";
|
96 |
+
}
|
97 |
+
}
|
98 |
+
|
99 |
+
const ExtensionInfo* FindRegisteredExtension(
|
100 |
+
const MessageLite* containing_type, int number) {
|
101 |
+
return global_registry == nullptr
|
102 |
+
? nullptr
|
103 |
+
: FindOrNull(*global_registry, std::make_pair(containing_type, number));
|
104 |
+
}
|
105 |
+
|
106 |
+
} // namespace
|
107 |
+
|
108 |
+
ExtensionFinder::~ExtensionFinder() {}
|
109 |
+
|
110 |
+
bool GeneratedExtensionFinder::Find(int number, ExtensionInfo* output) {
|
111 |
+
const ExtensionInfo* extension =
|
112 |
+
FindRegisteredExtension(containing_type_, number);
|
113 |
+
if (extension == NULL) {
|
114 |
+
return false;
|
115 |
+
} else {
|
116 |
+
*output = *extension;
|
117 |
+
return true;
|
118 |
+
}
|
119 |
+
}
|
120 |
+
|
121 |
+
void ExtensionSet::RegisterExtension(const MessageLite* containing_type,
|
122 |
+
int number, FieldType type,
|
123 |
+
bool is_repeated, bool is_packed) {
|
124 |
+
GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_ENUM);
|
125 |
+
GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_MESSAGE);
|
126 |
+
GOOGLE_CHECK_NE(type, WireFormatLite::TYPE_GROUP);
|
127 |
+
ExtensionInfo info(type, is_repeated, is_packed);
|
128 |
+
Register(containing_type, number, info);
|
129 |
+
}
|
130 |
+
|
131 |
+
static bool CallNoArgValidityFunc(const void* arg, int number) {
|
132 |
+
// Note: Must use C-style cast here rather than reinterpret_cast because
|
133 |
+
// the C++ standard at one point did not allow casts between function and
|
134 |
+
// data pointers and some compilers enforce this for C++-style casts. No
|
135 |
+
// compiler enforces it for C-style casts since lots of C-style code has
|
136 |
+
// relied on these kinds of casts for a long time, despite being
|
137 |
+
// technically undefined. See:
|
138 |
+
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#195
|
139 |
+
// Also note: Some compilers do not allow function pointers to be "const".
|
140 |
+
// Which makes sense, I suppose, because it's meaningless.
|
141 |
+
return ((EnumValidityFunc*)arg)(number);
|
142 |
+
}
|
143 |
+
|
144 |
+
void ExtensionSet::RegisterEnumExtension(const MessageLite* containing_type,
|
145 |
+
int number, FieldType type,
|
146 |
+
bool is_repeated, bool is_packed,
|
147 |
+
EnumValidityFunc* is_valid) {
|
148 |
+
GOOGLE_CHECK_EQ(type, WireFormatLite::TYPE_ENUM);
|
149 |
+
ExtensionInfo info(type, is_repeated, is_packed);
|
150 |
+
info.enum_validity_check.func = CallNoArgValidityFunc;
|
151 |
+
// See comment in CallNoArgValidityFunc() about why we use a c-style cast.
|
152 |
+
info.enum_validity_check.arg = (void*)is_valid;
|
153 |
+
Register(containing_type, number, info);
|
154 |
+
}
|
155 |
+
|
156 |
+
void ExtensionSet::RegisterMessageExtension(const MessageLite* containing_type,
|
157 |
+
int number, FieldType type,
|
158 |
+
bool is_repeated, bool is_packed,
|
159 |
+
const MessageLite* prototype) {
|
160 |
+
GOOGLE_CHECK(type == WireFormatLite::TYPE_MESSAGE ||
|
161 |
+
type == WireFormatLite::TYPE_GROUP);
|
162 |
+
ExtensionInfo info(type, is_repeated, is_packed);
|
163 |
+
info.message_prototype = prototype;
|
164 |
+
Register(containing_type, number, info);
|
165 |
+
}
|
166 |
+
|
167 |
+
|
168 |
+
// ===================================================================
|
169 |
+
// Constructors and basic methods.
|
170 |
+
|
171 |
+
ExtensionSet::ExtensionSet(::google::protobuf::Arena* arena)
|
172 |
+
: arena_(arena),
|
173 |
+
flat_capacity_(0),
|
174 |
+
flat_size_(0),
|
175 |
+
map_{flat_capacity_ == 0 ? NULL
|
176 |
+
: ::google::protobuf::Arena::CreateArray<KeyValue>(
|
177 |
+
arena_, flat_capacity_)} {}
|
178 |
+
|
179 |
+
ExtensionSet::ExtensionSet()
|
180 |
+
: arena_(NULL),
|
181 |
+
flat_capacity_(0),
|
182 |
+
flat_size_(0),
|
183 |
+
map_{flat_capacity_ == 0 ? NULL
|
184 |
+
: ::google::protobuf::Arena::CreateArray<KeyValue>(
|
185 |
+
arena_, flat_capacity_)} {}
|
186 |
+
|
187 |
+
ExtensionSet::~ExtensionSet() {
|
188 |
+
// Deletes all allocated extensions.
|
189 |
+
if (arena_ == NULL) {
|
190 |
+
ForEach([](int /* number */, Extension& ext) { ext.Free(); });
|
191 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
192 |
+
delete map_.large;
|
193 |
+
} else {
|
194 |
+
delete[] map_.flat;
|
195 |
+
}
|
196 |
+
}
|
197 |
+
}
|
198 |
+
|
199 |
+
// Defined in extension_set_heavy.cc.
|
200 |
+
// void ExtensionSet::AppendToList(const Descriptor* containing_type,
|
201 |
+
// const DescriptorPool* pool,
|
202 |
+
// vector<const FieldDescriptor*>* output) const
|
203 |
+
|
204 |
+
bool ExtensionSet::Has(int number) const {
|
205 |
+
const Extension* ext = FindOrNull(number);
|
206 |
+
if (ext == NULL) return false;
|
207 |
+
GOOGLE_DCHECK(!ext->is_repeated);
|
208 |
+
return !ext->is_cleared;
|
209 |
+
}
|
210 |
+
|
211 |
+
int ExtensionSet::NumExtensions() const {
|
212 |
+
int result = 0;
|
213 |
+
ForEach([&result](int /* number */, const Extension& ext) {
|
214 |
+
if (!ext.is_cleared) {
|
215 |
+
++result;
|
216 |
+
}
|
217 |
+
});
|
218 |
+
return result;
|
219 |
+
}
|
220 |
+
|
221 |
+
int ExtensionSet::ExtensionSize(int number) const {
|
222 |
+
const Extension* ext = FindOrNull(number);
|
223 |
+
return ext == NULL ? 0 : ext->GetSize();
|
224 |
+
}
|
225 |
+
|
226 |
+
FieldType ExtensionSet::ExtensionType(int number) const {
|
227 |
+
const Extension* ext = FindOrNull(number);
|
228 |
+
if (ext == NULL) {
|
229 |
+
GOOGLE_LOG(DFATAL) << "Don't lookup extension types if they aren't present (1). ";
|
230 |
+
return 0;
|
231 |
+
}
|
232 |
+
if (ext->is_cleared) {
|
233 |
+
GOOGLE_LOG(DFATAL) << "Don't lookup extension types if they aren't present (2). ";
|
234 |
+
}
|
235 |
+
return ext->type;
|
236 |
+
}
|
237 |
+
|
238 |
+
void ExtensionSet::ClearExtension(int number) {
|
239 |
+
Extension* ext = FindOrNull(number);
|
240 |
+
if (ext == NULL) return;
|
241 |
+
ext->Clear();
|
242 |
+
}
|
243 |
+
|
244 |
+
// ===================================================================
|
245 |
+
// Field accessors
|
246 |
+
|
247 |
+
namespace {
|
248 |
+
|
249 |
+
enum Cardinality {
|
250 |
+
REPEATED,
|
251 |
+
OPTIONAL
|
252 |
+
};
|
253 |
+
|
254 |
+
} // namespace
|
255 |
+
|
256 |
+
#define GOOGLE_DCHECK_TYPE(EXTENSION, LABEL, CPPTYPE) \
|
257 |
+
GOOGLE_DCHECK_EQ((EXTENSION).is_repeated ? REPEATED : OPTIONAL, LABEL); \
|
258 |
+
GOOGLE_DCHECK_EQ(cpp_type((EXTENSION).type), WireFormatLite::CPPTYPE_##CPPTYPE)
|
259 |
+
|
260 |
+
// -------------------------------------------------------------------
|
261 |
+
// Primitives
|
262 |
+
|
263 |
+
#define PRIMITIVE_ACCESSORS(UPPERCASE, LOWERCASE, CAMELCASE) \
|
264 |
+
\
|
265 |
+
LOWERCASE ExtensionSet::Get##CAMELCASE(int number, \
|
266 |
+
LOWERCASE default_value) const { \
|
267 |
+
const Extension* extension = FindOrNull(number); \
|
268 |
+
if (extension == NULL || extension->is_cleared) { \
|
269 |
+
return default_value; \
|
270 |
+
} else { \
|
271 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, UPPERCASE); \
|
272 |
+
return extension->LOWERCASE##_value; \
|
273 |
+
} \
|
274 |
+
} \
|
275 |
+
\
|
276 |
+
void ExtensionSet::Set##CAMELCASE(int number, FieldType type, \
|
277 |
+
LOWERCASE value, \
|
278 |
+
const FieldDescriptor* descriptor) { \
|
279 |
+
Extension* extension; \
|
280 |
+
if (MaybeNewExtension(number, descriptor, &extension)) { \
|
281 |
+
extension->type = type; \
|
282 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_##UPPERCASE); \
|
283 |
+
extension->is_repeated = false; \
|
284 |
+
} else { \
|
285 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, UPPERCASE); \
|
286 |
+
} \
|
287 |
+
extension->is_cleared = false; \
|
288 |
+
extension->LOWERCASE##_value = value; \
|
289 |
+
} \
|
290 |
+
\
|
291 |
+
LOWERCASE ExtensionSet::GetRepeated##CAMELCASE(int number, int index) const { \
|
292 |
+
const Extension* extension = FindOrNull(number); \
|
293 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; \
|
294 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \
|
295 |
+
return extension->repeated_##LOWERCASE##_value->Get(index); \
|
296 |
+
} \
|
297 |
+
\
|
298 |
+
void ExtensionSet::SetRepeated##CAMELCASE( \
|
299 |
+
int number, int index, LOWERCASE value) { \
|
300 |
+
Extension* extension = FindOrNull(number); \
|
301 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty)."; \
|
302 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \
|
303 |
+
extension->repeated_##LOWERCASE##_value->Set(index, value); \
|
304 |
+
} \
|
305 |
+
\
|
306 |
+
void ExtensionSet::Add##CAMELCASE(int number, FieldType type, \
|
307 |
+
bool packed, LOWERCASE value, \
|
308 |
+
const FieldDescriptor* descriptor) { \
|
309 |
+
Extension* extension; \
|
310 |
+
if (MaybeNewExtension(number, descriptor, &extension)) { \
|
311 |
+
extension->type = type; \
|
312 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_##UPPERCASE); \
|
313 |
+
extension->is_repeated = true; \
|
314 |
+
extension->is_packed = packed; \
|
315 |
+
extension->repeated_##LOWERCASE##_value = \
|
316 |
+
Arena::CreateMessage<RepeatedField<LOWERCASE> >(arena_); \
|
317 |
+
} else { \
|
318 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, UPPERCASE); \
|
319 |
+
GOOGLE_DCHECK_EQ(extension->is_packed, packed); \
|
320 |
+
} \
|
321 |
+
extension->repeated_##LOWERCASE##_value->Add(value); \
|
322 |
+
}
|
323 |
+
|
324 |
+
PRIMITIVE_ACCESSORS( INT32, int32, Int32)
|
325 |
+
PRIMITIVE_ACCESSORS( INT64, int64, Int64)
|
326 |
+
PRIMITIVE_ACCESSORS(UINT32, uint32, UInt32)
|
327 |
+
PRIMITIVE_ACCESSORS(UINT64, uint64, UInt64)
|
328 |
+
PRIMITIVE_ACCESSORS( FLOAT, float, Float)
|
329 |
+
PRIMITIVE_ACCESSORS(DOUBLE, double, Double)
|
330 |
+
PRIMITIVE_ACCESSORS( BOOL, bool, Bool)
|
331 |
+
|
332 |
+
#undef PRIMITIVE_ACCESSORS
|
333 |
+
|
334 |
+
const void* ExtensionSet::GetRawRepeatedField(int number,
|
335 |
+
const void* default_value) const {
|
336 |
+
const Extension* extension = FindOrNull(number);
|
337 |
+
if (extension == NULL) {
|
338 |
+
return default_value;
|
339 |
+
}
|
340 |
+
// We assume that all the RepeatedField<>* pointers have the same
|
341 |
+
// size and alignment within the anonymous union in Extension.
|
342 |
+
return extension->repeated_int32_value;
|
343 |
+
}
|
344 |
+
|
345 |
+
void* ExtensionSet::MutableRawRepeatedField(int number, FieldType field_type,
|
346 |
+
bool packed,
|
347 |
+
const FieldDescriptor* desc) {
|
348 |
+
Extension* extension;
|
349 |
+
|
350 |
+
// We instantiate an empty Repeated{,Ptr}Field if one doesn't exist for this
|
351 |
+
// extension.
|
352 |
+
if (MaybeNewExtension(number, desc, &extension)) {
|
353 |
+
extension->is_repeated = true;
|
354 |
+
extension->type = field_type;
|
355 |
+
extension->is_packed = packed;
|
356 |
+
|
357 |
+
switch (WireFormatLite::FieldTypeToCppType(
|
358 |
+
static_cast<WireFormatLite::FieldType>(field_type))) {
|
359 |
+
case WireFormatLite::CPPTYPE_INT32:
|
360 |
+
extension->repeated_int32_value =
|
361 |
+
Arena::CreateMessage<RepeatedField<int32> >(arena_);
|
362 |
+
break;
|
363 |
+
case WireFormatLite::CPPTYPE_INT64:
|
364 |
+
extension->repeated_int64_value =
|
365 |
+
Arena::CreateMessage<RepeatedField<int64> >(arena_);
|
366 |
+
break;
|
367 |
+
case WireFormatLite::CPPTYPE_UINT32:
|
368 |
+
extension->repeated_uint32_value =
|
369 |
+
Arena::CreateMessage<RepeatedField<uint32> >(arena_);
|
370 |
+
break;
|
371 |
+
case WireFormatLite::CPPTYPE_UINT64:
|
372 |
+
extension->repeated_uint64_value =
|
373 |
+
Arena::CreateMessage<RepeatedField<uint64> >(arena_);
|
374 |
+
break;
|
375 |
+
case WireFormatLite::CPPTYPE_DOUBLE:
|
376 |
+
extension->repeated_double_value =
|
377 |
+
Arena::CreateMessage<RepeatedField<double> >(arena_);
|
378 |
+
break;
|
379 |
+
case WireFormatLite::CPPTYPE_FLOAT:
|
380 |
+
extension->repeated_float_value =
|
381 |
+
Arena::CreateMessage<RepeatedField<float> >(arena_);
|
382 |
+
break;
|
383 |
+
case WireFormatLite::CPPTYPE_BOOL:
|
384 |
+
extension->repeated_bool_value =
|
385 |
+
Arena::CreateMessage<RepeatedField<bool> >(arena_);
|
386 |
+
break;
|
387 |
+
case WireFormatLite::CPPTYPE_ENUM:
|
388 |
+
extension->repeated_enum_value =
|
389 |
+
Arena::CreateMessage<RepeatedField<int> >(arena_);
|
390 |
+
break;
|
391 |
+
case WireFormatLite::CPPTYPE_STRING:
|
392 |
+
extension->repeated_string_value =
|
393 |
+
Arena::CreateMessage<RepeatedPtrField<::std::string> >(arena_);
|
394 |
+
break;
|
395 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
396 |
+
extension->repeated_message_value =
|
397 |
+
Arena::CreateMessage<RepeatedPtrField<MessageLite> >(arena_);
|
398 |
+
break;
|
399 |
+
}
|
400 |
+
}
|
401 |
+
|
402 |
+
// We assume that all the RepeatedField<>* pointers have the same
|
403 |
+
// size and alignment within the anonymous union in Extension.
|
404 |
+
return extension->repeated_int32_value;
|
405 |
+
}
|
406 |
+
|
407 |
+
// Compatible version using old call signature. Does not create extensions when
|
408 |
+
// the don't already exist; instead, just GOOGLE_CHECK-fails.
|
409 |
+
void* ExtensionSet::MutableRawRepeatedField(int number) {
|
410 |
+
Extension* extension = FindOrNull(number);
|
411 |
+
GOOGLE_CHECK(extension != NULL) << "Extension not found.";
|
412 |
+
// We assume that all the RepeatedField<>* pointers have the same
|
413 |
+
// size and alignment within the anonymous union in Extension.
|
414 |
+
return extension->repeated_int32_value;
|
415 |
+
}
|
416 |
+
|
417 |
+
|
418 |
+
// -------------------------------------------------------------------
|
419 |
+
// Enums
|
420 |
+
|
421 |
+
int ExtensionSet::GetEnum(int number, int default_value) const {
|
422 |
+
const Extension* extension = FindOrNull(number);
|
423 |
+
if (extension == NULL || extension->is_cleared) {
|
424 |
+
// Not present. Return the default value.
|
425 |
+
return default_value;
|
426 |
+
} else {
|
427 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, ENUM);
|
428 |
+
return extension->enum_value;
|
429 |
+
}
|
430 |
+
}
|
431 |
+
|
432 |
+
void ExtensionSet::SetEnum(int number, FieldType type, int value,
|
433 |
+
const FieldDescriptor* descriptor) {
|
434 |
+
Extension* extension;
|
435 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
436 |
+
extension->type = type;
|
437 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_ENUM);
|
438 |
+
extension->is_repeated = false;
|
439 |
+
} else {
|
440 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, ENUM);
|
441 |
+
}
|
442 |
+
extension->is_cleared = false;
|
443 |
+
extension->enum_value = value;
|
444 |
+
}
|
445 |
+
|
446 |
+
int ExtensionSet::GetRepeatedEnum(int number, int index) const {
|
447 |
+
const Extension* extension = FindOrNull(number);
|
448 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
449 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM);
|
450 |
+
return extension->repeated_enum_value->Get(index);
|
451 |
+
}
|
452 |
+
|
453 |
+
void ExtensionSet::SetRepeatedEnum(int number, int index, int value) {
|
454 |
+
Extension* extension = FindOrNull(number);
|
455 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
456 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM);
|
457 |
+
extension->repeated_enum_value->Set(index, value);
|
458 |
+
}
|
459 |
+
|
460 |
+
void ExtensionSet::AddEnum(int number, FieldType type,
|
461 |
+
bool packed, int value,
|
462 |
+
const FieldDescriptor* descriptor) {
|
463 |
+
Extension* extension;
|
464 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
465 |
+
extension->type = type;
|
466 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_ENUM);
|
467 |
+
extension->is_repeated = true;
|
468 |
+
extension->is_packed = packed;
|
469 |
+
extension->repeated_enum_value =
|
470 |
+
Arena::CreateMessage<RepeatedField<int> >(arena_);
|
471 |
+
} else {
|
472 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, ENUM);
|
473 |
+
GOOGLE_DCHECK_EQ(extension->is_packed, packed);
|
474 |
+
}
|
475 |
+
extension->repeated_enum_value->Add(value);
|
476 |
+
}
|
477 |
+
|
478 |
+
// -------------------------------------------------------------------
|
479 |
+
// Strings
|
480 |
+
|
481 |
+
const string& ExtensionSet::GetString(int number,
|
482 |
+
const string& default_value) const {
|
483 |
+
const Extension* extension = FindOrNull(number);
|
484 |
+
if (extension == NULL || extension->is_cleared) {
|
485 |
+
// Not present. Return the default value.
|
486 |
+
return default_value;
|
487 |
+
} else {
|
488 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, STRING);
|
489 |
+
return *extension->string_value;
|
490 |
+
}
|
491 |
+
}
|
492 |
+
|
493 |
+
string* ExtensionSet::MutableString(int number, FieldType type,
|
494 |
+
const FieldDescriptor* descriptor) {
|
495 |
+
Extension* extension;
|
496 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
497 |
+
extension->type = type;
|
498 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_STRING);
|
499 |
+
extension->is_repeated = false;
|
500 |
+
extension->string_value = Arena::Create<string>(arena_);
|
501 |
+
} else {
|
502 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, STRING);
|
503 |
+
}
|
504 |
+
extension->is_cleared = false;
|
505 |
+
return extension->string_value;
|
506 |
+
}
|
507 |
+
|
508 |
+
const string& ExtensionSet::GetRepeatedString(int number, int index) const {
|
509 |
+
const Extension* extension = FindOrNull(number);
|
510 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
511 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING);
|
512 |
+
return extension->repeated_string_value->Get(index);
|
513 |
+
}
|
514 |
+
|
515 |
+
string* ExtensionSet::MutableRepeatedString(int number, int index) {
|
516 |
+
Extension* extension = FindOrNull(number);
|
517 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
518 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING);
|
519 |
+
return extension->repeated_string_value->Mutable(index);
|
520 |
+
}
|
521 |
+
|
522 |
+
string* ExtensionSet::AddString(int number, FieldType type,
|
523 |
+
const FieldDescriptor* descriptor) {
|
524 |
+
Extension* extension;
|
525 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
526 |
+
extension->type = type;
|
527 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_STRING);
|
528 |
+
extension->is_repeated = true;
|
529 |
+
extension->is_packed = false;
|
530 |
+
extension->repeated_string_value =
|
531 |
+
Arena::CreateMessage<RepeatedPtrField<string> >(arena_);
|
532 |
+
} else {
|
533 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, STRING);
|
534 |
+
}
|
535 |
+
return extension->repeated_string_value->Add();
|
536 |
+
}
|
537 |
+
|
538 |
+
// -------------------------------------------------------------------
|
539 |
+
// Messages
|
540 |
+
|
541 |
+
const MessageLite& ExtensionSet::GetMessage(
|
542 |
+
int number, const MessageLite& default_value) const {
|
543 |
+
const Extension* extension = FindOrNull(number);
|
544 |
+
if (extension == NULL) {
|
545 |
+
// Not present. Return the default value.
|
546 |
+
return default_value;
|
547 |
+
} else {
|
548 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
549 |
+
if (extension->is_lazy) {
|
550 |
+
return extension->lazymessage_value->GetMessage(default_value);
|
551 |
+
} else {
|
552 |
+
return *extension->message_value;
|
553 |
+
}
|
554 |
+
}
|
555 |
+
}
|
556 |
+
|
557 |
+
// Defined in extension_set_heavy.cc.
|
558 |
+
// const MessageLite& ExtensionSet::GetMessage(int number,
|
559 |
+
// const Descriptor* message_type,
|
560 |
+
// MessageFactory* factory) const
|
561 |
+
|
562 |
+
MessageLite* ExtensionSet::MutableMessage(int number, FieldType type,
|
563 |
+
const MessageLite& prototype,
|
564 |
+
const FieldDescriptor* descriptor) {
|
565 |
+
Extension* extension;
|
566 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
567 |
+
extension->type = type;
|
568 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE);
|
569 |
+
extension->is_repeated = false;
|
570 |
+
extension->is_lazy = false;
|
571 |
+
extension->message_value = prototype.New(arena_);
|
572 |
+
extension->is_cleared = false;
|
573 |
+
return extension->message_value;
|
574 |
+
} else {
|
575 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
576 |
+
extension->is_cleared = false;
|
577 |
+
if (extension->is_lazy) {
|
578 |
+
return extension->lazymessage_value->MutableMessage(prototype);
|
579 |
+
} else {
|
580 |
+
return extension->message_value;
|
581 |
+
}
|
582 |
+
}
|
583 |
+
}
|
584 |
+
|
585 |
+
// Defined in extension_set_heavy.cc.
|
586 |
+
// MessageLite* ExtensionSet::MutableMessage(int number, FieldType type,
|
587 |
+
// const Descriptor* message_type,
|
588 |
+
// MessageFactory* factory)
|
589 |
+
|
590 |
+
void ExtensionSet::SetAllocatedMessage(int number, FieldType type,
|
591 |
+
const FieldDescriptor* descriptor,
|
592 |
+
MessageLite* message) {
|
593 |
+
if (message == NULL) {
|
594 |
+
ClearExtension(number);
|
595 |
+
return;
|
596 |
+
}
|
597 |
+
::google::protobuf::Arena* message_arena = message->GetArena();
|
598 |
+
Extension* extension;
|
599 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
600 |
+
extension->type = type;
|
601 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE);
|
602 |
+
extension->is_repeated = false;
|
603 |
+
extension->is_lazy = false;
|
604 |
+
if (message_arena == arena_) {
|
605 |
+
extension->message_value = message;
|
606 |
+
} else if (message_arena == NULL) {
|
607 |
+
extension->message_value = message;
|
608 |
+
arena_->Own(message); // not NULL because not equal to message_arena
|
609 |
+
} else {
|
610 |
+
extension->message_value = message->New(arena_);
|
611 |
+
extension->message_value->CheckTypeAndMergeFrom(*message);
|
612 |
+
}
|
613 |
+
} else {
|
614 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
615 |
+
if (extension->is_lazy) {
|
616 |
+
extension->lazymessage_value->SetAllocatedMessage(message);
|
617 |
+
} else {
|
618 |
+
if (arena_ == NULL) {
|
619 |
+
delete extension->message_value;
|
620 |
+
}
|
621 |
+
if (message_arena == arena_) {
|
622 |
+
extension->message_value = message;
|
623 |
+
} else if (message_arena == NULL) {
|
624 |
+
extension->message_value = message;
|
625 |
+
arena_->Own(message); // not NULL because not equal to message_arena
|
626 |
+
} else {
|
627 |
+
extension->message_value = message->New(arena_);
|
628 |
+
extension->message_value->CheckTypeAndMergeFrom(*message);
|
629 |
+
}
|
630 |
+
}
|
631 |
+
}
|
632 |
+
extension->is_cleared = false;
|
633 |
+
}
|
634 |
+
|
635 |
+
void ExtensionSet::UnsafeArenaSetAllocatedMessage(
|
636 |
+
int number, FieldType type, const FieldDescriptor* descriptor,
|
637 |
+
MessageLite* message) {
|
638 |
+
if (message == NULL) {
|
639 |
+
ClearExtension(number);
|
640 |
+
return;
|
641 |
+
}
|
642 |
+
Extension* extension;
|
643 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
644 |
+
extension->type = type;
|
645 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE);
|
646 |
+
extension->is_repeated = false;
|
647 |
+
extension->is_lazy = false;
|
648 |
+
extension->message_value = message;
|
649 |
+
} else {
|
650 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
651 |
+
if (extension->is_lazy) {
|
652 |
+
extension->lazymessage_value->UnsafeArenaSetAllocatedMessage(message);
|
653 |
+
} else {
|
654 |
+
if (arena_ == NULL) {
|
655 |
+
delete extension->message_value;
|
656 |
+
}
|
657 |
+
extension->message_value = message;
|
658 |
+
}
|
659 |
+
}
|
660 |
+
extension->is_cleared = false;
|
661 |
+
}
|
662 |
+
|
663 |
+
MessageLite* ExtensionSet::ReleaseMessage(int number,
|
664 |
+
const MessageLite& prototype) {
|
665 |
+
Extension* extension = FindOrNull(number);
|
666 |
+
if (extension == NULL) {
|
667 |
+
// Not present. Return NULL.
|
668 |
+
return NULL;
|
669 |
+
} else {
|
670 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
671 |
+
MessageLite* ret = NULL;
|
672 |
+
if (extension->is_lazy) {
|
673 |
+
ret = extension->lazymessage_value->ReleaseMessage(prototype);
|
674 |
+
if (arena_ == NULL) {
|
675 |
+
delete extension->lazymessage_value;
|
676 |
+
}
|
677 |
+
} else {
|
678 |
+
if (arena_ == NULL) {
|
679 |
+
ret = extension->message_value;
|
680 |
+
} else {
|
681 |
+
// ReleaseMessage() always returns a heap-allocated message, and we are
|
682 |
+
// on an arena, so we need to make a copy of this message to return.
|
683 |
+
ret = extension->message_value->New();
|
684 |
+
ret->CheckTypeAndMergeFrom(*extension->message_value);
|
685 |
+
}
|
686 |
+
}
|
687 |
+
Erase(number);
|
688 |
+
return ret;
|
689 |
+
}
|
690 |
+
}
|
691 |
+
|
692 |
+
MessageLite* ExtensionSet::UnsafeArenaReleaseMessage(
|
693 |
+
int number, const MessageLite& prototype) {
|
694 |
+
Extension* extension = FindOrNull(number);
|
695 |
+
if (extension == NULL) {
|
696 |
+
// Not present. Return NULL.
|
697 |
+
return NULL;
|
698 |
+
} else {
|
699 |
+
GOOGLE_DCHECK_TYPE(*extension, OPTIONAL, MESSAGE);
|
700 |
+
MessageLite* ret = NULL;
|
701 |
+
if (extension->is_lazy) {
|
702 |
+
ret = extension->lazymessage_value->UnsafeArenaReleaseMessage(prototype);
|
703 |
+
if (arena_ == NULL) {
|
704 |
+
delete extension->lazymessage_value;
|
705 |
+
}
|
706 |
+
} else {
|
707 |
+
ret = extension->message_value;
|
708 |
+
}
|
709 |
+
Erase(number);
|
710 |
+
return ret;
|
711 |
+
}
|
712 |
+
}
|
713 |
+
|
714 |
+
// Defined in extension_set_heavy.cc.
|
715 |
+
// MessageLite* ExtensionSet::ReleaseMessage(const FieldDescriptor* descriptor,
|
716 |
+
// MessageFactory* factory);
|
717 |
+
|
718 |
+
const MessageLite& ExtensionSet::GetRepeatedMessage(
|
719 |
+
int number, int index) const {
|
720 |
+
const Extension* extension = FindOrNull(number);
|
721 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
722 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE);
|
723 |
+
return extension->repeated_message_value->Get(index);
|
724 |
+
}
|
725 |
+
|
726 |
+
MessageLite* ExtensionSet::MutableRepeatedMessage(int number, int index) {
|
727 |
+
Extension* extension = FindOrNull(number);
|
728 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
729 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE);
|
730 |
+
return extension->repeated_message_value->Mutable(index);
|
731 |
+
}
|
732 |
+
|
733 |
+
MessageLite* ExtensionSet::AddMessage(int number, FieldType type,
|
734 |
+
const MessageLite& prototype,
|
735 |
+
const FieldDescriptor* descriptor) {
|
736 |
+
Extension* extension;
|
737 |
+
if (MaybeNewExtension(number, descriptor, &extension)) {
|
738 |
+
extension->type = type;
|
739 |
+
GOOGLE_DCHECK_EQ(cpp_type(extension->type), WireFormatLite::CPPTYPE_MESSAGE);
|
740 |
+
extension->is_repeated = true;
|
741 |
+
extension->repeated_message_value =
|
742 |
+
Arena::CreateMessage<RepeatedPtrField<MessageLite> >(arena_);
|
743 |
+
} else {
|
744 |
+
GOOGLE_DCHECK_TYPE(*extension, REPEATED, MESSAGE);
|
745 |
+
}
|
746 |
+
|
747 |
+
// RepeatedPtrField<MessageLite> does not know how to Add() since it cannot
|
748 |
+
// allocate an abstract object, so we have to be tricky.
|
749 |
+
MessageLite* result =
|
750 |
+
reinterpret_cast<::google::protobuf::internal::RepeatedPtrFieldBase*>(
|
751 |
+
extension->repeated_message_value)
|
752 |
+
->AddFromCleared<GenericTypeHandler<MessageLite> >();
|
753 |
+
if (result == NULL) {
|
754 |
+
result = prototype.New(arena_);
|
755 |
+
extension->repeated_message_value->AddAllocated(result);
|
756 |
+
}
|
757 |
+
return result;
|
758 |
+
}
|
759 |
+
|
760 |
+
// Defined in extension_set_heavy.cc.
|
761 |
+
// MessageLite* ExtensionSet::AddMessage(int number, FieldType type,
|
762 |
+
// const Descriptor* message_type,
|
763 |
+
// MessageFactory* factory)
|
764 |
+
|
765 |
+
#undef GOOGLE_DCHECK_TYPE
|
766 |
+
|
767 |
+
void ExtensionSet::RemoveLast(int number) {
|
768 |
+
Extension* extension = FindOrNull(number);
|
769 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
770 |
+
GOOGLE_DCHECK(extension->is_repeated);
|
771 |
+
|
772 |
+
switch(cpp_type(extension->type)) {
|
773 |
+
case WireFormatLite::CPPTYPE_INT32:
|
774 |
+
extension->repeated_int32_value->RemoveLast();
|
775 |
+
break;
|
776 |
+
case WireFormatLite::CPPTYPE_INT64:
|
777 |
+
extension->repeated_int64_value->RemoveLast();
|
778 |
+
break;
|
779 |
+
case WireFormatLite::CPPTYPE_UINT32:
|
780 |
+
extension->repeated_uint32_value->RemoveLast();
|
781 |
+
break;
|
782 |
+
case WireFormatLite::CPPTYPE_UINT64:
|
783 |
+
extension->repeated_uint64_value->RemoveLast();
|
784 |
+
break;
|
785 |
+
case WireFormatLite::CPPTYPE_FLOAT:
|
786 |
+
extension->repeated_float_value->RemoveLast();
|
787 |
+
break;
|
788 |
+
case WireFormatLite::CPPTYPE_DOUBLE:
|
789 |
+
extension->repeated_double_value->RemoveLast();
|
790 |
+
break;
|
791 |
+
case WireFormatLite::CPPTYPE_BOOL:
|
792 |
+
extension->repeated_bool_value->RemoveLast();
|
793 |
+
break;
|
794 |
+
case WireFormatLite::CPPTYPE_ENUM:
|
795 |
+
extension->repeated_enum_value->RemoveLast();
|
796 |
+
break;
|
797 |
+
case WireFormatLite::CPPTYPE_STRING:
|
798 |
+
extension->repeated_string_value->RemoveLast();
|
799 |
+
break;
|
800 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
801 |
+
extension->repeated_message_value->RemoveLast();
|
802 |
+
break;
|
803 |
+
}
|
804 |
+
}
|
805 |
+
|
806 |
+
MessageLite* ExtensionSet::ReleaseLast(int number) {
|
807 |
+
Extension* extension = FindOrNull(number);
|
808 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
809 |
+
GOOGLE_DCHECK(extension->is_repeated);
|
810 |
+
GOOGLE_DCHECK(cpp_type(extension->type) == WireFormatLite::CPPTYPE_MESSAGE);
|
811 |
+
return extension->repeated_message_value->ReleaseLast();
|
812 |
+
}
|
813 |
+
|
814 |
+
void ExtensionSet::SwapElements(int number, int index1, int index2) {
|
815 |
+
Extension* extension = FindOrNull(number);
|
816 |
+
GOOGLE_CHECK(extension != NULL) << "Index out-of-bounds (field is empty).";
|
817 |
+
GOOGLE_DCHECK(extension->is_repeated);
|
818 |
+
|
819 |
+
switch(cpp_type(extension->type)) {
|
820 |
+
case WireFormatLite::CPPTYPE_INT32:
|
821 |
+
extension->repeated_int32_value->SwapElements(index1, index2);
|
822 |
+
break;
|
823 |
+
case WireFormatLite::CPPTYPE_INT64:
|
824 |
+
extension->repeated_int64_value->SwapElements(index1, index2);
|
825 |
+
break;
|
826 |
+
case WireFormatLite::CPPTYPE_UINT32:
|
827 |
+
extension->repeated_uint32_value->SwapElements(index1, index2);
|
828 |
+
break;
|
829 |
+
case WireFormatLite::CPPTYPE_UINT64:
|
830 |
+
extension->repeated_uint64_value->SwapElements(index1, index2);
|
831 |
+
break;
|
832 |
+
case WireFormatLite::CPPTYPE_FLOAT:
|
833 |
+
extension->repeated_float_value->SwapElements(index1, index2);
|
834 |
+
break;
|
835 |
+
case WireFormatLite::CPPTYPE_DOUBLE:
|
836 |
+
extension->repeated_double_value->SwapElements(index1, index2);
|
837 |
+
break;
|
838 |
+
case WireFormatLite::CPPTYPE_BOOL:
|
839 |
+
extension->repeated_bool_value->SwapElements(index1, index2);
|
840 |
+
break;
|
841 |
+
case WireFormatLite::CPPTYPE_ENUM:
|
842 |
+
extension->repeated_enum_value->SwapElements(index1, index2);
|
843 |
+
break;
|
844 |
+
case WireFormatLite::CPPTYPE_STRING:
|
845 |
+
extension->repeated_string_value->SwapElements(index1, index2);
|
846 |
+
break;
|
847 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
848 |
+
extension->repeated_message_value->SwapElements(index1, index2);
|
849 |
+
break;
|
850 |
+
}
|
851 |
+
}
|
852 |
+
|
853 |
+
// ===================================================================
|
854 |
+
|
855 |
+
void ExtensionSet::Clear() {
|
856 |
+
ForEach([](int /* number */, Extension& ext) { ext.Clear(); });
|
857 |
+
}
|
858 |
+
|
859 |
+
namespace {
|
860 |
+
// Computes the size of a std::set_union without constructing the union.
|
861 |
+
template <typename ItX, typename ItY>
|
862 |
+
size_t SizeOfUnion(ItX it_xs, ItX end_xs, ItY it_ys, ItY end_ys) {
|
863 |
+
size_t result = 0;
|
864 |
+
while (it_xs != end_xs && it_ys != end_ys) {
|
865 |
+
++result;
|
866 |
+
if (it_xs->first < it_ys->first) {
|
867 |
+
++it_xs;
|
868 |
+
} else if (it_xs->first == it_ys->first) {
|
869 |
+
++it_xs;
|
870 |
+
++it_ys;
|
871 |
+
} else {
|
872 |
+
++it_ys;
|
873 |
+
}
|
874 |
+
}
|
875 |
+
result += std::distance(it_xs, end_xs);
|
876 |
+
result += std::distance(it_ys, end_ys);
|
877 |
+
return result;
|
878 |
+
}
|
879 |
+
} // namespace
|
880 |
+
|
881 |
+
void ExtensionSet::MergeFrom(const ExtensionSet& other) {
|
882 |
+
if (GOOGLE_PREDICT_TRUE(!is_large())) {
|
883 |
+
if (GOOGLE_PREDICT_TRUE(!other.is_large())) {
|
884 |
+
GrowCapacity(SizeOfUnion(flat_begin(), flat_end(), other.flat_begin(),
|
885 |
+
other.flat_end()));
|
886 |
+
} else {
|
887 |
+
GrowCapacity(SizeOfUnion(flat_begin(), flat_end(),
|
888 |
+
other.map_.large->begin(),
|
889 |
+
other.map_.large->end()));
|
890 |
+
}
|
891 |
+
}
|
892 |
+
other.ForEach([this](int number, const Extension& ext) {
|
893 |
+
this->InternalExtensionMergeFrom(number, ext);
|
894 |
+
});
|
895 |
+
}
|
896 |
+
|
897 |
+
void ExtensionSet::InternalExtensionMergeFrom(
|
898 |
+
int number, const Extension& other_extension) {
|
899 |
+
if (other_extension.is_repeated) {
|
900 |
+
Extension* extension;
|
901 |
+
bool is_new = MaybeNewExtension(number, other_extension.descriptor,
|
902 |
+
&extension);
|
903 |
+
if (is_new) {
|
904 |
+
// Extension did not already exist in set.
|
905 |
+
extension->type = other_extension.type;
|
906 |
+
extension->is_packed = other_extension.is_packed;
|
907 |
+
extension->is_repeated = true;
|
908 |
+
} else {
|
909 |
+
GOOGLE_DCHECK_EQ(extension->type, other_extension.type);
|
910 |
+
GOOGLE_DCHECK_EQ(extension->is_packed, other_extension.is_packed);
|
911 |
+
GOOGLE_DCHECK(extension->is_repeated);
|
912 |
+
}
|
913 |
+
|
914 |
+
switch (cpp_type(other_extension.type)) {
|
915 |
+
#define HANDLE_TYPE(UPPERCASE, LOWERCASE, REPEATED_TYPE) \
|
916 |
+
case WireFormatLite::CPPTYPE_##UPPERCASE: \
|
917 |
+
if (is_new) { \
|
918 |
+
extension->repeated_##LOWERCASE##_value = \
|
919 |
+
Arena::CreateMessage<REPEATED_TYPE >(arena_); \
|
920 |
+
} \
|
921 |
+
extension->repeated_##LOWERCASE##_value->MergeFrom( \
|
922 |
+
*other_extension.repeated_##LOWERCASE##_value); \
|
923 |
+
break;
|
924 |
+
|
925 |
+
HANDLE_TYPE( INT32, int32, RepeatedField < int32>);
|
926 |
+
HANDLE_TYPE( INT64, int64, RepeatedField < int64>);
|
927 |
+
HANDLE_TYPE( UINT32, uint32, RepeatedField < uint32>);
|
928 |
+
HANDLE_TYPE( UINT64, uint64, RepeatedField < uint64>);
|
929 |
+
HANDLE_TYPE( FLOAT, float, RepeatedField < float>);
|
930 |
+
HANDLE_TYPE( DOUBLE, double, RepeatedField < double>);
|
931 |
+
HANDLE_TYPE( BOOL, bool, RepeatedField < bool>);
|
932 |
+
HANDLE_TYPE( ENUM, enum, RepeatedField < int>);
|
933 |
+
HANDLE_TYPE( STRING, string, RepeatedPtrField< string>);
|
934 |
+
#undef HANDLE_TYPE
|
935 |
+
|
936 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
937 |
+
if (is_new) {
|
938 |
+
extension->repeated_message_value =
|
939 |
+
Arena::CreateMessage<RepeatedPtrField<MessageLite> >(arena_);
|
940 |
+
}
|
941 |
+
// We can't call RepeatedPtrField<MessageLite>::MergeFrom() because
|
942 |
+
// it would attempt to allocate new objects.
|
943 |
+
RepeatedPtrField<MessageLite>* other_repeated_message =
|
944 |
+
other_extension.repeated_message_value;
|
945 |
+
for (int i = 0; i < other_repeated_message->size(); i++) {
|
946 |
+
const MessageLite& other_message = other_repeated_message->Get(i);
|
947 |
+
MessageLite* target =
|
948 |
+
reinterpret_cast<::google::protobuf::internal::RepeatedPtrFieldBase*>(
|
949 |
+
extension->repeated_message_value)
|
950 |
+
->AddFromCleared<GenericTypeHandler<MessageLite> >();
|
951 |
+
if (target == NULL) {
|
952 |
+
target = other_message.New(arena_);
|
953 |
+
extension->repeated_message_value->AddAllocated(target);
|
954 |
+
}
|
955 |
+
target->CheckTypeAndMergeFrom(other_message);
|
956 |
+
}
|
957 |
+
break;
|
958 |
+
}
|
959 |
+
} else {
|
960 |
+
if (!other_extension.is_cleared) {
|
961 |
+
switch (cpp_type(other_extension.type)) {
|
962 |
+
#define HANDLE_TYPE(UPPERCASE, LOWERCASE, CAMELCASE) \
|
963 |
+
case WireFormatLite::CPPTYPE_##UPPERCASE: \
|
964 |
+
Set##CAMELCASE(number, other_extension.type, \
|
965 |
+
other_extension.LOWERCASE##_value, \
|
966 |
+
other_extension.descriptor); \
|
967 |
+
break;
|
968 |
+
|
969 |
+
HANDLE_TYPE( INT32, int32, Int32);
|
970 |
+
HANDLE_TYPE( INT64, int64, Int64);
|
971 |
+
HANDLE_TYPE(UINT32, uint32, UInt32);
|
972 |
+
HANDLE_TYPE(UINT64, uint64, UInt64);
|
973 |
+
HANDLE_TYPE( FLOAT, float, Float);
|
974 |
+
HANDLE_TYPE(DOUBLE, double, Double);
|
975 |
+
HANDLE_TYPE( BOOL, bool, Bool);
|
976 |
+
HANDLE_TYPE( ENUM, enum, Enum);
|
977 |
+
#undef HANDLE_TYPE
|
978 |
+
case WireFormatLite::CPPTYPE_STRING:
|
979 |
+
SetString(number, other_extension.type,
|
980 |
+
*other_extension.string_value,
|
981 |
+
other_extension.descriptor);
|
982 |
+
break;
|
983 |
+
case WireFormatLite::CPPTYPE_MESSAGE: {
|
984 |
+
Extension* extension;
|
985 |
+
bool is_new = MaybeNewExtension(number,
|
986 |
+
other_extension.descriptor,
|
987 |
+
&extension);
|
988 |
+
if (is_new) {
|
989 |
+
extension->type = other_extension.type;
|
990 |
+
extension->is_packed = other_extension.is_packed;
|
991 |
+
extension->is_repeated = false;
|
992 |
+
if (other_extension.is_lazy) {
|
993 |
+
extension->is_lazy = true;
|
994 |
+
extension->lazymessage_value =
|
995 |
+
other_extension.lazymessage_value->New(arena_);
|
996 |
+
extension->lazymessage_value->MergeFrom(
|
997 |
+
*other_extension.lazymessage_value);
|
998 |
+
} else {
|
999 |
+
extension->is_lazy = false;
|
1000 |
+
extension->message_value =
|
1001 |
+
other_extension.message_value->New(arena_);
|
1002 |
+
extension->message_value->CheckTypeAndMergeFrom(
|
1003 |
+
*other_extension.message_value);
|
1004 |
+
}
|
1005 |
+
} else {
|
1006 |
+
GOOGLE_DCHECK_EQ(extension->type, other_extension.type);
|
1007 |
+
GOOGLE_DCHECK_EQ(extension->is_packed,other_extension.is_packed);
|
1008 |
+
GOOGLE_DCHECK(!extension->is_repeated);
|
1009 |
+
if (other_extension.is_lazy) {
|
1010 |
+
if (extension->is_lazy) {
|
1011 |
+
extension->lazymessage_value->MergeFrom(
|
1012 |
+
*other_extension.lazymessage_value);
|
1013 |
+
} else {
|
1014 |
+
extension->message_value->CheckTypeAndMergeFrom(
|
1015 |
+
other_extension.lazymessage_value->GetMessage(
|
1016 |
+
*extension->message_value));
|
1017 |
+
}
|
1018 |
+
} else {
|
1019 |
+
if (extension->is_lazy) {
|
1020 |
+
extension->lazymessage_value->MutableMessage(
|
1021 |
+
*other_extension.message_value)->CheckTypeAndMergeFrom(
|
1022 |
+
*other_extension.message_value);
|
1023 |
+
} else {
|
1024 |
+
extension->message_value->CheckTypeAndMergeFrom(
|
1025 |
+
*other_extension.message_value);
|
1026 |
+
}
|
1027 |
+
}
|
1028 |
+
}
|
1029 |
+
extension->is_cleared = false;
|
1030 |
+
break;
|
1031 |
+
}
|
1032 |
+
}
|
1033 |
+
}
|
1034 |
+
}
|
1035 |
+
}
|
1036 |
+
|
1037 |
+
void ExtensionSet::Swap(ExtensionSet* x) {
|
1038 |
+
if (GetArenaNoVirtual() == x->GetArenaNoVirtual()) {
|
1039 |
+
using std::swap;
|
1040 |
+
swap(flat_capacity_, x->flat_capacity_);
|
1041 |
+
swap(flat_size_, x->flat_size_);
|
1042 |
+
swap(map_, x->map_);
|
1043 |
+
} else {
|
1044 |
+
// TODO(cfallin, rohananil): We maybe able to optimize a case where we are
|
1045 |
+
// swapping from heap to arena-allocated extension set, by just Own()'ing
|
1046 |
+
// the extensions.
|
1047 |
+
ExtensionSet extension_set;
|
1048 |
+
extension_set.MergeFrom(*x);
|
1049 |
+
x->Clear();
|
1050 |
+
x->MergeFrom(*this);
|
1051 |
+
Clear();
|
1052 |
+
MergeFrom(extension_set);
|
1053 |
+
}
|
1054 |
+
}
|
1055 |
+
|
1056 |
+
void ExtensionSet::SwapExtension(ExtensionSet* other,
|
1057 |
+
int number) {
|
1058 |
+
if (this == other) return;
|
1059 |
+
Extension* this_ext = FindOrNull(number);
|
1060 |
+
Extension* other_ext = other->FindOrNull(number);
|
1061 |
+
|
1062 |
+
if (this_ext == NULL && other_ext == NULL) {
|
1063 |
+
return;
|
1064 |
+
}
|
1065 |
+
|
1066 |
+
if (this_ext != NULL && other_ext != NULL) {
|
1067 |
+
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
|
1068 |
+
using std::swap;
|
1069 |
+
swap(*this_ext, *other_ext);
|
1070 |
+
} else {
|
1071 |
+
// TODO(cfallin, rohananil): We could further optimize these cases,
|
1072 |
+
// especially avoid creation of ExtensionSet, and move MergeFrom logic
|
1073 |
+
// into Extensions itself (which takes arena as an argument).
|
1074 |
+
// We do it this way to reuse the copy-across-arenas logic already
|
1075 |
+
// implemented in ExtensionSet's MergeFrom.
|
1076 |
+
ExtensionSet temp;
|
1077 |
+
temp.InternalExtensionMergeFrom(number, *other_ext);
|
1078 |
+
Extension* temp_ext = temp.FindOrNull(number);
|
1079 |
+
other_ext->Clear();
|
1080 |
+
other->InternalExtensionMergeFrom(number, *this_ext);
|
1081 |
+
this_ext->Clear();
|
1082 |
+
InternalExtensionMergeFrom(number, *temp_ext);
|
1083 |
+
}
|
1084 |
+
return;
|
1085 |
+
}
|
1086 |
+
|
1087 |
+
if (this_ext == NULL) {
|
1088 |
+
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
|
1089 |
+
*Insert(number).first = *other_ext;
|
1090 |
+
} else {
|
1091 |
+
InternalExtensionMergeFrom(number, *other_ext);
|
1092 |
+
}
|
1093 |
+
other->Erase(number);
|
1094 |
+
return;
|
1095 |
+
}
|
1096 |
+
|
1097 |
+
if (other_ext == NULL) {
|
1098 |
+
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
|
1099 |
+
*other->Insert(number).first = *this_ext;
|
1100 |
+
} else {
|
1101 |
+
other->InternalExtensionMergeFrom(number, *this_ext);
|
1102 |
+
}
|
1103 |
+
Erase(number);
|
1104 |
+
return;
|
1105 |
+
}
|
1106 |
+
}
|
1107 |
+
|
1108 |
+
bool ExtensionSet::IsInitialized() const {
|
1109 |
+
// Extensions are never required. However, we need to check that all
|
1110 |
+
// embedded messages are initialized.
|
1111 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1112 |
+
for (const auto& kv : *map_.large) {
|
1113 |
+
if (!kv.second.IsInitialized()) return false;
|
1114 |
+
}
|
1115 |
+
return true;
|
1116 |
+
}
|
1117 |
+
for (const KeyValue* it = flat_begin(); it != flat_end(); ++it) {
|
1118 |
+
if (!it->second.IsInitialized()) return false;
|
1119 |
+
}
|
1120 |
+
return true;
|
1121 |
+
}
|
1122 |
+
|
1123 |
+
bool ExtensionSet::FindExtensionInfoFromTag(
|
1124 |
+
uint32 tag, ExtensionFinder* extension_finder, int* field_number,
|
1125 |
+
ExtensionInfo* extension, bool* was_packed_on_wire) {
|
1126 |
+
*field_number = WireFormatLite::GetTagFieldNumber(tag);
|
1127 |
+
WireFormatLite::WireType wire_type = WireFormatLite::GetTagWireType(tag);
|
1128 |
+
return FindExtensionInfoFromFieldNumber(wire_type, *field_number,
|
1129 |
+
extension_finder, extension,
|
1130 |
+
was_packed_on_wire);
|
1131 |
+
}
|
1132 |
+
|
1133 |
+
bool ExtensionSet::FindExtensionInfoFromFieldNumber(
|
1134 |
+
int wire_type, int field_number, ExtensionFinder* extension_finder,
|
1135 |
+
ExtensionInfo* extension, bool* was_packed_on_wire) {
|
1136 |
+
if (!extension_finder->Find(field_number, extension)) {
|
1137 |
+
return false;
|
1138 |
+
}
|
1139 |
+
|
1140 |
+
WireFormatLite::WireType expected_wire_type =
|
1141 |
+
WireFormatLite::WireTypeForFieldType(real_type(extension->type));
|
1142 |
+
|
1143 |
+
// Check if this is a packed field.
|
1144 |
+
*was_packed_on_wire = false;
|
1145 |
+
if (extension->is_repeated &&
|
1146 |
+
wire_type == WireFormatLite::WIRETYPE_LENGTH_DELIMITED &&
|
1147 |
+
is_packable(expected_wire_type)) {
|
1148 |
+
*was_packed_on_wire = true;
|
1149 |
+
return true;
|
1150 |
+
}
|
1151 |
+
// Otherwise the wire type must match.
|
1152 |
+
return expected_wire_type == wire_type;
|
1153 |
+
}
|
1154 |
+
|
1155 |
+
bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input,
|
1156 |
+
ExtensionFinder* extension_finder,
|
1157 |
+
FieldSkipper* field_skipper) {
|
1158 |
+
int number;
|
1159 |
+
bool was_packed_on_wire;
|
1160 |
+
ExtensionInfo extension;
|
1161 |
+
if (!FindExtensionInfoFromTag(
|
1162 |
+
tag, extension_finder, &number, &extension, &was_packed_on_wire)) {
|
1163 |
+
return field_skipper->SkipField(input, tag);
|
1164 |
+
} else {
|
1165 |
+
return ParseFieldWithExtensionInfo(
|
1166 |
+
number, was_packed_on_wire, extension, input, field_skipper);
|
1167 |
+
}
|
1168 |
+
}
|
1169 |
+
|
1170 |
+
bool ExtensionSet::ParseFieldWithExtensionInfo(
|
1171 |
+
int number, bool was_packed_on_wire, const ExtensionInfo& extension,
|
1172 |
+
io::CodedInputStream* input,
|
1173 |
+
FieldSkipper* field_skipper) {
|
1174 |
+
// Explicitly not read extension.is_packed, instead check whether the field
|
1175 |
+
// was encoded in packed form on the wire.
|
1176 |
+
if (was_packed_on_wire) {
|
1177 |
+
uint32 size;
|
1178 |
+
if (!input->ReadVarint32(&size)) return false;
|
1179 |
+
io::CodedInputStream::Limit limit = input->PushLimit(size);
|
1180 |
+
|
1181 |
+
switch (extension.type) {
|
1182 |
+
#define HANDLE_TYPE(UPPERCASE, CPP_CAMELCASE, CPP_LOWERCASE) \
|
1183 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1184 |
+
while (input->BytesUntilLimit() > 0) { \
|
1185 |
+
CPP_LOWERCASE value; \
|
1186 |
+
if (!WireFormatLite::ReadPrimitive< \
|
1187 |
+
CPP_LOWERCASE, WireFormatLite::TYPE_##UPPERCASE>( \
|
1188 |
+
input, &value)) return false; \
|
1189 |
+
Add##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, \
|
1190 |
+
extension.is_packed, value, \
|
1191 |
+
extension.descriptor); \
|
1192 |
+
} \
|
1193 |
+
break
|
1194 |
+
|
1195 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1196 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1197 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1198 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1199 |
+
HANDLE_TYPE( SINT32, Int32, int32);
|
1200 |
+
HANDLE_TYPE( SINT64, Int64, int64);
|
1201 |
+
HANDLE_TYPE( FIXED32, UInt32, uint32);
|
1202 |
+
HANDLE_TYPE( FIXED64, UInt64, uint64);
|
1203 |
+
HANDLE_TYPE(SFIXED32, Int32, int32);
|
1204 |
+
HANDLE_TYPE(SFIXED64, Int64, int64);
|
1205 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1206 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1207 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1208 |
+
#undef HANDLE_TYPE
|
1209 |
+
|
1210 |
+
case WireFormatLite::TYPE_ENUM:
|
1211 |
+
while (input->BytesUntilLimit() > 0) {
|
1212 |
+
int value;
|
1213 |
+
if (!WireFormatLite::ReadPrimitive<int, WireFormatLite::TYPE_ENUM>(
|
1214 |
+
input, &value)) return false;
|
1215 |
+
if (extension.enum_validity_check.func(
|
1216 |
+
extension.enum_validity_check.arg, value)) {
|
1217 |
+
AddEnum(number, WireFormatLite::TYPE_ENUM, extension.is_packed,
|
1218 |
+
value, extension.descriptor);
|
1219 |
+
} else {
|
1220 |
+
// Invalid value. Treat as unknown.
|
1221 |
+
field_skipper->SkipUnknownEnum(number, value);
|
1222 |
+
}
|
1223 |
+
}
|
1224 |
+
break;
|
1225 |
+
|
1226 |
+
case WireFormatLite::TYPE_STRING:
|
1227 |
+
case WireFormatLite::TYPE_BYTES:
|
1228 |
+
case WireFormatLite::TYPE_GROUP:
|
1229 |
+
case WireFormatLite::TYPE_MESSAGE:
|
1230 |
+
GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed.";
|
1231 |
+
break;
|
1232 |
+
}
|
1233 |
+
|
1234 |
+
input->PopLimit(limit);
|
1235 |
+
} else {
|
1236 |
+
switch (extension.type) {
|
1237 |
+
#define HANDLE_TYPE(UPPERCASE, CPP_CAMELCASE, CPP_LOWERCASE) \
|
1238 |
+
case WireFormatLite::TYPE_##UPPERCASE: { \
|
1239 |
+
CPP_LOWERCASE value; \
|
1240 |
+
if (!WireFormatLite::ReadPrimitive< \
|
1241 |
+
CPP_LOWERCASE, WireFormatLite::TYPE_##UPPERCASE>( \
|
1242 |
+
input, &value)) return false; \
|
1243 |
+
if (extension.is_repeated) { \
|
1244 |
+
Add##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, \
|
1245 |
+
extension.is_packed, value, \
|
1246 |
+
extension.descriptor); \
|
1247 |
+
} else { \
|
1248 |
+
Set##CPP_CAMELCASE(number, WireFormatLite::TYPE_##UPPERCASE, value, \
|
1249 |
+
extension.descriptor); \
|
1250 |
+
} \
|
1251 |
+
} break
|
1252 |
+
|
1253 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1254 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1255 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1256 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1257 |
+
HANDLE_TYPE( SINT32, Int32, int32);
|
1258 |
+
HANDLE_TYPE( SINT64, Int64, int64);
|
1259 |
+
HANDLE_TYPE( FIXED32, UInt32, uint32);
|
1260 |
+
HANDLE_TYPE( FIXED64, UInt64, uint64);
|
1261 |
+
HANDLE_TYPE(SFIXED32, Int32, int32);
|
1262 |
+
HANDLE_TYPE(SFIXED64, Int64, int64);
|
1263 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1264 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1265 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1266 |
+
#undef HANDLE_TYPE
|
1267 |
+
|
1268 |
+
case WireFormatLite::TYPE_ENUM: {
|
1269 |
+
int value;
|
1270 |
+
if (!WireFormatLite::ReadPrimitive<int, WireFormatLite::TYPE_ENUM>(
|
1271 |
+
input, &value)) return false;
|
1272 |
+
|
1273 |
+
if (!extension.enum_validity_check.func(
|
1274 |
+
extension.enum_validity_check.arg, value)) {
|
1275 |
+
// Invalid value. Treat as unknown.
|
1276 |
+
field_skipper->SkipUnknownEnum(number, value);
|
1277 |
+
} else if (extension.is_repeated) {
|
1278 |
+
AddEnum(number, WireFormatLite::TYPE_ENUM, extension.is_packed, value,
|
1279 |
+
extension.descriptor);
|
1280 |
+
} else {
|
1281 |
+
SetEnum(number, WireFormatLite::TYPE_ENUM, value,
|
1282 |
+
extension.descriptor);
|
1283 |
+
}
|
1284 |
+
break;
|
1285 |
+
}
|
1286 |
+
|
1287 |
+
case WireFormatLite::TYPE_STRING: {
|
1288 |
+
string* value = extension.is_repeated ?
|
1289 |
+
AddString(number, WireFormatLite::TYPE_STRING, extension.descriptor) :
|
1290 |
+
MutableString(number, WireFormatLite::TYPE_STRING,
|
1291 |
+
extension.descriptor);
|
1292 |
+
if (!WireFormatLite::ReadString(input, value)) return false;
|
1293 |
+
break;
|
1294 |
+
}
|
1295 |
+
|
1296 |
+
case WireFormatLite::TYPE_BYTES: {
|
1297 |
+
string* value = extension.is_repeated ?
|
1298 |
+
AddString(number, WireFormatLite::TYPE_BYTES, extension.descriptor) :
|
1299 |
+
MutableString(number, WireFormatLite::TYPE_BYTES,
|
1300 |
+
extension.descriptor);
|
1301 |
+
if (!WireFormatLite::ReadBytes(input, value)) return false;
|
1302 |
+
break;
|
1303 |
+
}
|
1304 |
+
|
1305 |
+
case WireFormatLite::TYPE_GROUP: {
|
1306 |
+
MessageLite* value = extension.is_repeated ?
|
1307 |
+
AddMessage(number, WireFormatLite::TYPE_GROUP,
|
1308 |
+
*extension.message_prototype, extension.descriptor) :
|
1309 |
+
MutableMessage(number, WireFormatLite::TYPE_GROUP,
|
1310 |
+
*extension.message_prototype, extension.descriptor);
|
1311 |
+
if (!WireFormatLite::ReadGroup(number, input, value)) return false;
|
1312 |
+
break;
|
1313 |
+
}
|
1314 |
+
|
1315 |
+
case WireFormatLite::TYPE_MESSAGE: {
|
1316 |
+
MessageLite* value = extension.is_repeated ?
|
1317 |
+
AddMessage(number, WireFormatLite::TYPE_MESSAGE,
|
1318 |
+
*extension.message_prototype, extension.descriptor) :
|
1319 |
+
MutableMessage(number, WireFormatLite::TYPE_MESSAGE,
|
1320 |
+
*extension.message_prototype, extension.descriptor);
|
1321 |
+
if (!WireFormatLite::ReadMessage(input, value)) return false;
|
1322 |
+
break;
|
1323 |
+
}
|
1324 |
+
}
|
1325 |
+
}
|
1326 |
+
|
1327 |
+
return true;
|
1328 |
+
}
|
1329 |
+
|
1330 |
+
bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input,
|
1331 |
+
const MessageLite* containing_type) {
|
1332 |
+
FieldSkipper skipper;
|
1333 |
+
GeneratedExtensionFinder finder(containing_type);
|
1334 |
+
return ParseField(tag, input, &finder, &skipper);
|
1335 |
+
}
|
1336 |
+
|
1337 |
+
bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input,
|
1338 |
+
const MessageLite* containing_type,
|
1339 |
+
io::CodedOutputStream* unknown_fields) {
|
1340 |
+
CodedOutputStreamFieldSkipper skipper(unknown_fields);
|
1341 |
+
GeneratedExtensionFinder finder(containing_type);
|
1342 |
+
return ParseField(tag, input, &finder, &skipper);
|
1343 |
+
}
|
1344 |
+
|
1345 |
+
// Defined in extension_set_heavy.cc.
|
1346 |
+
// bool ExtensionSet::ParseField(uint32 tag, io::CodedInputStream* input,
|
1347 |
+
// const MessageLite* containing_type,
|
1348 |
+
// UnknownFieldSet* unknown_fields)
|
1349 |
+
|
1350 |
+
// Defined in extension_set_heavy.cc.
|
1351 |
+
// bool ExtensionSet::ParseMessageSet(io::CodedInputStream* input,
|
1352 |
+
// const MessageLite* containing_type,
|
1353 |
+
// UnknownFieldSet* unknown_fields);
|
1354 |
+
|
1355 |
+
void ExtensionSet::SerializeWithCachedSizes(
|
1356 |
+
int start_field_number, int end_field_number,
|
1357 |
+
io::CodedOutputStream* output) const {
|
1358 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1359 |
+
const auto& end = map_.large->end();
|
1360 |
+
for (auto it = map_.large->lower_bound(start_field_number);
|
1361 |
+
it != end && it->first < end_field_number; ++it) {
|
1362 |
+
it->second.SerializeFieldWithCachedSizes(it->first, output);
|
1363 |
+
}
|
1364 |
+
return;
|
1365 |
+
}
|
1366 |
+
const KeyValue* end = flat_end();
|
1367 |
+
for (const KeyValue* it = std::lower_bound(
|
1368 |
+
flat_begin(), end, start_field_number, KeyValue::FirstComparator());
|
1369 |
+
it != end && it->first < end_field_number; ++it) {
|
1370 |
+
it->second.SerializeFieldWithCachedSizes(it->first, output);
|
1371 |
+
}
|
1372 |
+
}
|
1373 |
+
|
1374 |
+
size_t ExtensionSet::ByteSize() const {
|
1375 |
+
size_t total_size = 0;
|
1376 |
+
ForEach([&total_size](int number, const Extension& ext) {
|
1377 |
+
total_size += ext.ByteSize(number);
|
1378 |
+
});
|
1379 |
+
return total_size;
|
1380 |
+
}
|
1381 |
+
|
1382 |
+
// Defined in extension_set_heavy.cc.
|
1383 |
+
// int ExtensionSet::SpaceUsedExcludingSelf() const
|
1384 |
+
|
1385 |
+
bool ExtensionSet::MaybeNewExtension(int number,
|
1386 |
+
const FieldDescriptor* descriptor,
|
1387 |
+
Extension** result) {
|
1388 |
+
bool extension_is_new = false;
|
1389 |
+
std::tie(*result, extension_is_new) = Insert(number);
|
1390 |
+
(*result)->descriptor = descriptor;
|
1391 |
+
return extension_is_new;
|
1392 |
+
}
|
1393 |
+
|
1394 |
+
// ===================================================================
|
1395 |
+
// Methods of ExtensionSet::Extension
|
1396 |
+
|
1397 |
+
void ExtensionSet::Extension::Clear() {
|
1398 |
+
if (is_repeated) {
|
1399 |
+
switch (cpp_type(type)) {
|
1400 |
+
#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \
|
1401 |
+
case WireFormatLite::CPPTYPE_##UPPERCASE: \
|
1402 |
+
repeated_##LOWERCASE##_value->Clear(); \
|
1403 |
+
break
|
1404 |
+
|
1405 |
+
HANDLE_TYPE( INT32, int32);
|
1406 |
+
HANDLE_TYPE( INT64, int64);
|
1407 |
+
HANDLE_TYPE( UINT32, uint32);
|
1408 |
+
HANDLE_TYPE( UINT64, uint64);
|
1409 |
+
HANDLE_TYPE( FLOAT, float);
|
1410 |
+
HANDLE_TYPE( DOUBLE, double);
|
1411 |
+
HANDLE_TYPE( BOOL, bool);
|
1412 |
+
HANDLE_TYPE( ENUM, enum);
|
1413 |
+
HANDLE_TYPE( STRING, string);
|
1414 |
+
HANDLE_TYPE(MESSAGE, message);
|
1415 |
+
#undef HANDLE_TYPE
|
1416 |
+
}
|
1417 |
+
} else {
|
1418 |
+
if (!is_cleared) {
|
1419 |
+
switch (cpp_type(type)) {
|
1420 |
+
case WireFormatLite::CPPTYPE_STRING:
|
1421 |
+
string_value->clear();
|
1422 |
+
break;
|
1423 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
1424 |
+
if (is_lazy) {
|
1425 |
+
lazymessage_value->Clear();
|
1426 |
+
} else {
|
1427 |
+
message_value->Clear();
|
1428 |
+
}
|
1429 |
+
break;
|
1430 |
+
default:
|
1431 |
+
// No need to do anything. Get*() will return the default value
|
1432 |
+
// as long as is_cleared is true and Set*() will overwrite the
|
1433 |
+
// previous value.
|
1434 |
+
break;
|
1435 |
+
}
|
1436 |
+
|
1437 |
+
is_cleared = true;
|
1438 |
+
}
|
1439 |
+
}
|
1440 |
+
}
|
1441 |
+
|
1442 |
+
void ExtensionSet::Extension::SerializeFieldWithCachedSizes(
|
1443 |
+
int number,
|
1444 |
+
io::CodedOutputStream* output) const {
|
1445 |
+
if (is_repeated) {
|
1446 |
+
if (is_packed) {
|
1447 |
+
if (cached_size == 0) return;
|
1448 |
+
|
1449 |
+
WireFormatLite::WriteTag(number,
|
1450 |
+
WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
|
1451 |
+
output->WriteVarint32(cached_size);
|
1452 |
+
|
1453 |
+
switch (real_type(type)) {
|
1454 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1455 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1456 |
+
for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \
|
1457 |
+
WireFormatLite::Write##CAMELCASE##NoTag( \
|
1458 |
+
repeated_##LOWERCASE##_value->Get(i), output); \
|
1459 |
+
} \
|
1460 |
+
break
|
1461 |
+
|
1462 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1463 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1464 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1465 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1466 |
+
HANDLE_TYPE( SINT32, SInt32, int32);
|
1467 |
+
HANDLE_TYPE( SINT64, SInt64, int64);
|
1468 |
+
HANDLE_TYPE( FIXED32, Fixed32, uint32);
|
1469 |
+
HANDLE_TYPE( FIXED64, Fixed64, uint64);
|
1470 |
+
HANDLE_TYPE(SFIXED32, SFixed32, int32);
|
1471 |
+
HANDLE_TYPE(SFIXED64, SFixed64, int64);
|
1472 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1473 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1474 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1475 |
+
HANDLE_TYPE( ENUM, Enum, enum);
|
1476 |
+
#undef HANDLE_TYPE
|
1477 |
+
|
1478 |
+
case WireFormatLite::TYPE_STRING:
|
1479 |
+
case WireFormatLite::TYPE_BYTES:
|
1480 |
+
case WireFormatLite::TYPE_GROUP:
|
1481 |
+
case WireFormatLite::TYPE_MESSAGE:
|
1482 |
+
GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed.";
|
1483 |
+
break;
|
1484 |
+
}
|
1485 |
+
} else {
|
1486 |
+
switch (real_type(type)) {
|
1487 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1488 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1489 |
+
for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \
|
1490 |
+
WireFormatLite::Write##CAMELCASE(number, \
|
1491 |
+
repeated_##LOWERCASE##_value->Get(i), output); \
|
1492 |
+
} \
|
1493 |
+
break
|
1494 |
+
|
1495 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1496 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1497 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1498 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1499 |
+
HANDLE_TYPE( SINT32, SInt32, int32);
|
1500 |
+
HANDLE_TYPE( SINT64, SInt64, int64);
|
1501 |
+
HANDLE_TYPE( FIXED32, Fixed32, uint32);
|
1502 |
+
HANDLE_TYPE( FIXED64, Fixed64, uint64);
|
1503 |
+
HANDLE_TYPE(SFIXED32, SFixed32, int32);
|
1504 |
+
HANDLE_TYPE(SFIXED64, SFixed64, int64);
|
1505 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1506 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1507 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1508 |
+
HANDLE_TYPE( STRING, String, string);
|
1509 |
+
HANDLE_TYPE( BYTES, Bytes, string);
|
1510 |
+
HANDLE_TYPE( ENUM, Enum, enum);
|
1511 |
+
HANDLE_TYPE( GROUP, Group, message);
|
1512 |
+
HANDLE_TYPE( MESSAGE, Message, message);
|
1513 |
+
#undef HANDLE_TYPE
|
1514 |
+
}
|
1515 |
+
}
|
1516 |
+
} else if (!is_cleared) {
|
1517 |
+
switch (real_type(type)) {
|
1518 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, VALUE) \
|
1519 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1520 |
+
WireFormatLite::Write##CAMELCASE(number, VALUE, output); \
|
1521 |
+
break
|
1522 |
+
|
1523 |
+
HANDLE_TYPE( INT32, Int32, int32_value);
|
1524 |
+
HANDLE_TYPE( INT64, Int64, int64_value);
|
1525 |
+
HANDLE_TYPE( UINT32, UInt32, uint32_value);
|
1526 |
+
HANDLE_TYPE( UINT64, UInt64, uint64_value);
|
1527 |
+
HANDLE_TYPE( SINT32, SInt32, int32_value);
|
1528 |
+
HANDLE_TYPE( SINT64, SInt64, int64_value);
|
1529 |
+
HANDLE_TYPE( FIXED32, Fixed32, uint32_value);
|
1530 |
+
HANDLE_TYPE( FIXED64, Fixed64, uint64_value);
|
1531 |
+
HANDLE_TYPE(SFIXED32, SFixed32, int32_value);
|
1532 |
+
HANDLE_TYPE(SFIXED64, SFixed64, int64_value);
|
1533 |
+
HANDLE_TYPE( FLOAT, Float, float_value);
|
1534 |
+
HANDLE_TYPE( DOUBLE, Double, double_value);
|
1535 |
+
HANDLE_TYPE( BOOL, Bool, bool_value);
|
1536 |
+
HANDLE_TYPE( STRING, String, *string_value);
|
1537 |
+
HANDLE_TYPE( BYTES, Bytes, *string_value);
|
1538 |
+
HANDLE_TYPE( ENUM, Enum, enum_value);
|
1539 |
+
HANDLE_TYPE( GROUP, Group, *message_value);
|
1540 |
+
#undef HANDLE_TYPE
|
1541 |
+
case WireFormatLite::TYPE_MESSAGE:
|
1542 |
+
if (is_lazy) {
|
1543 |
+
lazymessage_value->WriteMessage(number, output);
|
1544 |
+
} else {
|
1545 |
+
WireFormatLite::WriteMessage(number, *message_value, output);
|
1546 |
+
}
|
1547 |
+
break;
|
1548 |
+
}
|
1549 |
+
}
|
1550 |
+
}
|
1551 |
+
|
1552 |
+
size_t ExtensionSet::Extension::ByteSize(int number) const {
|
1553 |
+
size_t result = 0;
|
1554 |
+
|
1555 |
+
if (is_repeated) {
|
1556 |
+
if (is_packed) {
|
1557 |
+
switch (real_type(type)) {
|
1558 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1559 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1560 |
+
for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \
|
1561 |
+
result += WireFormatLite::CAMELCASE##Size( \
|
1562 |
+
repeated_##LOWERCASE##_value->Get(i)); \
|
1563 |
+
} \
|
1564 |
+
break
|
1565 |
+
|
1566 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1567 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1568 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1569 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1570 |
+
HANDLE_TYPE( SINT32, SInt32, int32);
|
1571 |
+
HANDLE_TYPE( SINT64, SInt64, int64);
|
1572 |
+
HANDLE_TYPE( ENUM, Enum, enum);
|
1573 |
+
#undef HANDLE_TYPE
|
1574 |
+
|
1575 |
+
// Stuff with fixed size.
|
1576 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1577 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1578 |
+
result += WireFormatLite::k##CAMELCASE##Size * \
|
1579 |
+
FromIntSize(repeated_##LOWERCASE##_value->size()); \
|
1580 |
+
break
|
1581 |
+
HANDLE_TYPE( FIXED32, Fixed32, uint32);
|
1582 |
+
HANDLE_TYPE( FIXED64, Fixed64, uint64);
|
1583 |
+
HANDLE_TYPE(SFIXED32, SFixed32, int32);
|
1584 |
+
HANDLE_TYPE(SFIXED64, SFixed64, int64);
|
1585 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1586 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1587 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1588 |
+
#undef HANDLE_TYPE
|
1589 |
+
|
1590 |
+
case WireFormatLite::TYPE_STRING:
|
1591 |
+
case WireFormatLite::TYPE_BYTES:
|
1592 |
+
case WireFormatLite::TYPE_GROUP:
|
1593 |
+
case WireFormatLite::TYPE_MESSAGE:
|
1594 |
+
GOOGLE_LOG(FATAL) << "Non-primitive types can't be packed.";
|
1595 |
+
break;
|
1596 |
+
}
|
1597 |
+
|
1598 |
+
cached_size = ToCachedSize(result);
|
1599 |
+
if (result > 0) {
|
1600 |
+
result += io::CodedOutputStream::VarintSize32(result);
|
1601 |
+
result += io::CodedOutputStream::VarintSize32(
|
1602 |
+
WireFormatLite::MakeTag(number,
|
1603 |
+
WireFormatLite::WIRETYPE_LENGTH_DELIMITED));
|
1604 |
+
}
|
1605 |
+
} else {
|
1606 |
+
size_t tag_size = WireFormatLite::TagSize(number, real_type(type));
|
1607 |
+
|
1608 |
+
switch (real_type(type)) {
|
1609 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1610 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1611 |
+
result += tag_size * \
|
1612 |
+
FromIntSize(repeated_##LOWERCASE##_value->size()); \
|
1613 |
+
for (int i = 0; i < repeated_##LOWERCASE##_value->size(); i++) { \
|
1614 |
+
result += WireFormatLite::CAMELCASE##Size( \
|
1615 |
+
repeated_##LOWERCASE##_value->Get(i)); \
|
1616 |
+
} \
|
1617 |
+
break
|
1618 |
+
|
1619 |
+
HANDLE_TYPE( INT32, Int32, int32);
|
1620 |
+
HANDLE_TYPE( INT64, Int64, int64);
|
1621 |
+
HANDLE_TYPE( UINT32, UInt32, uint32);
|
1622 |
+
HANDLE_TYPE( UINT64, UInt64, uint64);
|
1623 |
+
HANDLE_TYPE( SINT32, SInt32, int32);
|
1624 |
+
HANDLE_TYPE( SINT64, SInt64, int64);
|
1625 |
+
HANDLE_TYPE( STRING, String, string);
|
1626 |
+
HANDLE_TYPE( BYTES, Bytes, string);
|
1627 |
+
HANDLE_TYPE( ENUM, Enum, enum);
|
1628 |
+
HANDLE_TYPE( GROUP, Group, message);
|
1629 |
+
HANDLE_TYPE( MESSAGE, Message, message);
|
1630 |
+
#undef HANDLE_TYPE
|
1631 |
+
|
1632 |
+
// Stuff with fixed size.
|
1633 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1634 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1635 |
+
result += (tag_size + WireFormatLite::k##CAMELCASE##Size) * \
|
1636 |
+
FromIntSize(repeated_##LOWERCASE##_value->size()); \
|
1637 |
+
break
|
1638 |
+
HANDLE_TYPE( FIXED32, Fixed32, uint32);
|
1639 |
+
HANDLE_TYPE( FIXED64, Fixed64, uint64);
|
1640 |
+
HANDLE_TYPE(SFIXED32, SFixed32, int32);
|
1641 |
+
HANDLE_TYPE(SFIXED64, SFixed64, int64);
|
1642 |
+
HANDLE_TYPE( FLOAT, Float, float);
|
1643 |
+
HANDLE_TYPE( DOUBLE, Double, double);
|
1644 |
+
HANDLE_TYPE( BOOL, Bool, bool);
|
1645 |
+
#undef HANDLE_TYPE
|
1646 |
+
}
|
1647 |
+
}
|
1648 |
+
} else if (!is_cleared) {
|
1649 |
+
result += WireFormatLite::TagSize(number, real_type(type));
|
1650 |
+
switch (real_type(type)) {
|
1651 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE, LOWERCASE) \
|
1652 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1653 |
+
result += WireFormatLite::CAMELCASE##Size(LOWERCASE); \
|
1654 |
+
break
|
1655 |
+
|
1656 |
+
HANDLE_TYPE( INT32, Int32, int32_value);
|
1657 |
+
HANDLE_TYPE( INT64, Int64, int64_value);
|
1658 |
+
HANDLE_TYPE( UINT32, UInt32, uint32_value);
|
1659 |
+
HANDLE_TYPE( UINT64, UInt64, uint64_value);
|
1660 |
+
HANDLE_TYPE( SINT32, SInt32, int32_value);
|
1661 |
+
HANDLE_TYPE( SINT64, SInt64, int64_value);
|
1662 |
+
HANDLE_TYPE( STRING, String, *string_value);
|
1663 |
+
HANDLE_TYPE( BYTES, Bytes, *string_value);
|
1664 |
+
HANDLE_TYPE( ENUM, Enum, enum_value);
|
1665 |
+
HANDLE_TYPE( GROUP, Group, *message_value);
|
1666 |
+
#undef HANDLE_TYPE
|
1667 |
+
case WireFormatLite::TYPE_MESSAGE: {
|
1668 |
+
if (is_lazy) {
|
1669 |
+
size_t size = lazymessage_value->ByteSize();
|
1670 |
+
result += io::CodedOutputStream::VarintSize32(size) + size;
|
1671 |
+
} else {
|
1672 |
+
result += WireFormatLite::MessageSize(*message_value);
|
1673 |
+
}
|
1674 |
+
break;
|
1675 |
+
}
|
1676 |
+
|
1677 |
+
// Stuff with fixed size.
|
1678 |
+
#define HANDLE_TYPE(UPPERCASE, CAMELCASE) \
|
1679 |
+
case WireFormatLite::TYPE_##UPPERCASE: \
|
1680 |
+
result += WireFormatLite::k##CAMELCASE##Size; \
|
1681 |
+
break
|
1682 |
+
HANDLE_TYPE( FIXED32, Fixed32);
|
1683 |
+
HANDLE_TYPE( FIXED64, Fixed64);
|
1684 |
+
HANDLE_TYPE(SFIXED32, SFixed32);
|
1685 |
+
HANDLE_TYPE(SFIXED64, SFixed64);
|
1686 |
+
HANDLE_TYPE( FLOAT, Float);
|
1687 |
+
HANDLE_TYPE( DOUBLE, Double);
|
1688 |
+
HANDLE_TYPE( BOOL, Bool);
|
1689 |
+
#undef HANDLE_TYPE
|
1690 |
+
}
|
1691 |
+
}
|
1692 |
+
|
1693 |
+
return result;
|
1694 |
+
}
|
1695 |
+
|
1696 |
+
int ExtensionSet::Extension::GetSize() const {
|
1697 |
+
GOOGLE_DCHECK(is_repeated);
|
1698 |
+
switch (cpp_type(type)) {
|
1699 |
+
#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \
|
1700 |
+
case WireFormatLite::CPPTYPE_##UPPERCASE: \
|
1701 |
+
return repeated_##LOWERCASE##_value->size()
|
1702 |
+
|
1703 |
+
HANDLE_TYPE( INT32, int32);
|
1704 |
+
HANDLE_TYPE( INT64, int64);
|
1705 |
+
HANDLE_TYPE( UINT32, uint32);
|
1706 |
+
HANDLE_TYPE( UINT64, uint64);
|
1707 |
+
HANDLE_TYPE( FLOAT, float);
|
1708 |
+
HANDLE_TYPE( DOUBLE, double);
|
1709 |
+
HANDLE_TYPE( BOOL, bool);
|
1710 |
+
HANDLE_TYPE( ENUM, enum);
|
1711 |
+
HANDLE_TYPE( STRING, string);
|
1712 |
+
HANDLE_TYPE(MESSAGE, message);
|
1713 |
+
#undef HANDLE_TYPE
|
1714 |
+
}
|
1715 |
+
|
1716 |
+
GOOGLE_LOG(FATAL) << "Can't get here.";
|
1717 |
+
return 0;
|
1718 |
+
}
|
1719 |
+
|
1720 |
+
// This function deletes all allocated objects. This function should be only
|
1721 |
+
// called if the Extension was created with an arena.
|
1722 |
+
void ExtensionSet::Extension::Free() {
|
1723 |
+
if (is_repeated) {
|
1724 |
+
switch (cpp_type(type)) {
|
1725 |
+
#define HANDLE_TYPE(UPPERCASE, LOWERCASE) \
|
1726 |
+
case WireFormatLite::CPPTYPE_##UPPERCASE: \
|
1727 |
+
delete repeated_##LOWERCASE##_value; \
|
1728 |
+
break
|
1729 |
+
|
1730 |
+
HANDLE_TYPE( INT32, int32);
|
1731 |
+
HANDLE_TYPE( INT64, int64);
|
1732 |
+
HANDLE_TYPE( UINT32, uint32);
|
1733 |
+
HANDLE_TYPE( UINT64, uint64);
|
1734 |
+
HANDLE_TYPE( FLOAT, float);
|
1735 |
+
HANDLE_TYPE( DOUBLE, double);
|
1736 |
+
HANDLE_TYPE( BOOL, bool);
|
1737 |
+
HANDLE_TYPE( ENUM, enum);
|
1738 |
+
HANDLE_TYPE( STRING, string);
|
1739 |
+
HANDLE_TYPE(MESSAGE, message);
|
1740 |
+
#undef HANDLE_TYPE
|
1741 |
+
}
|
1742 |
+
} else {
|
1743 |
+
switch (cpp_type(type)) {
|
1744 |
+
case WireFormatLite::CPPTYPE_STRING:
|
1745 |
+
delete string_value;
|
1746 |
+
break;
|
1747 |
+
case WireFormatLite::CPPTYPE_MESSAGE:
|
1748 |
+
if (is_lazy) {
|
1749 |
+
delete lazymessage_value;
|
1750 |
+
} else {
|
1751 |
+
delete message_value;
|
1752 |
+
}
|
1753 |
+
break;
|
1754 |
+
default:
|
1755 |
+
break;
|
1756 |
+
}
|
1757 |
+
}
|
1758 |
+
}
|
1759 |
+
|
1760 |
+
// Defined in extension_set_heavy.cc.
|
1761 |
+
// int ExtensionSet::Extension::SpaceUsedExcludingSelf() const
|
1762 |
+
|
1763 |
+
bool ExtensionSet::Extension::IsInitialized() const {
|
1764 |
+
if (cpp_type(type) == WireFormatLite::CPPTYPE_MESSAGE) {
|
1765 |
+
if (is_repeated) {
|
1766 |
+
for (int i = 0; i < repeated_message_value->size(); i++) {
|
1767 |
+
if (!repeated_message_value->Get(i).IsInitialized()) {
|
1768 |
+
return false;
|
1769 |
+
}
|
1770 |
+
}
|
1771 |
+
} else {
|
1772 |
+
if (!is_cleared) {
|
1773 |
+
if (is_lazy) {
|
1774 |
+
if (!lazymessage_value->IsInitialized()) return false;
|
1775 |
+
} else {
|
1776 |
+
if (!message_value->IsInitialized()) return false;
|
1777 |
+
}
|
1778 |
+
}
|
1779 |
+
}
|
1780 |
+
}
|
1781 |
+
return true;
|
1782 |
+
}
|
1783 |
+
|
1784 |
+
// Dummy key method to avoid weak vtable.
|
1785 |
+
void ExtensionSet::LazyMessageExtension::UnusedKeyMethod() {}
|
1786 |
+
|
1787 |
+
const ExtensionSet::Extension* ExtensionSet::FindOrNull(int key) const {
|
1788 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1789 |
+
return FindOrNullInLargeMap(key);
|
1790 |
+
}
|
1791 |
+
const KeyValue* end = flat_end();
|
1792 |
+
const KeyValue* it =
|
1793 |
+
std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator());
|
1794 |
+
if (it != end && it->first == key) {
|
1795 |
+
return &it->second;
|
1796 |
+
}
|
1797 |
+
return NULL;
|
1798 |
+
}
|
1799 |
+
|
1800 |
+
const ExtensionSet::Extension* ExtensionSet::FindOrNullInLargeMap(
|
1801 |
+
int key) const {
|
1802 |
+
assert(is_large());
|
1803 |
+
LargeMap::const_iterator it = map_.large->find(key);
|
1804 |
+
if (it != map_.large->end()) {
|
1805 |
+
return &it->second;
|
1806 |
+
}
|
1807 |
+
return NULL;
|
1808 |
+
}
|
1809 |
+
|
1810 |
+
ExtensionSet::Extension* ExtensionSet::FindOrNull(int key) {
|
1811 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1812 |
+
return FindOrNullInLargeMap(key);
|
1813 |
+
}
|
1814 |
+
KeyValue* end = flat_end();
|
1815 |
+
KeyValue* it =
|
1816 |
+
std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator());
|
1817 |
+
if (it != end && it->first == key) {
|
1818 |
+
return &it->second;
|
1819 |
+
}
|
1820 |
+
return NULL;
|
1821 |
+
}
|
1822 |
+
|
1823 |
+
ExtensionSet::Extension* ExtensionSet::FindOrNullInLargeMap(int key) {
|
1824 |
+
assert(is_large());
|
1825 |
+
LargeMap::iterator it = map_.large->find(key);
|
1826 |
+
if (it != map_.large->end()) {
|
1827 |
+
return &it->second;
|
1828 |
+
}
|
1829 |
+
return NULL;
|
1830 |
+
}
|
1831 |
+
|
1832 |
+
std::pair<ExtensionSet::Extension*, bool> ExtensionSet::Insert(int key) {
|
1833 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1834 |
+
auto maybe = map_.large->insert({key, Extension()});
|
1835 |
+
return {&maybe.first->second, maybe.second};
|
1836 |
+
}
|
1837 |
+
KeyValue* end = flat_end();
|
1838 |
+
KeyValue* it =
|
1839 |
+
std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator());
|
1840 |
+
if (it != end && it->first == key) {
|
1841 |
+
return {&it->second, false};
|
1842 |
+
}
|
1843 |
+
if (flat_size_ < flat_capacity_) {
|
1844 |
+
std::copy_backward(it, end, end + 1);
|
1845 |
+
++flat_size_;
|
1846 |
+
it->first = key;
|
1847 |
+
it->second = Extension();
|
1848 |
+
return {&it->second, true};
|
1849 |
+
}
|
1850 |
+
GrowCapacity(flat_size_ + 1);
|
1851 |
+
return Insert(key);
|
1852 |
+
}
|
1853 |
+
|
1854 |
+
void ExtensionSet::GrowCapacity(size_t minimum_new_capacity) {
|
1855 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1856 |
+
return; // LargeMap does not have a "reserve" method.
|
1857 |
+
}
|
1858 |
+
if (flat_capacity_ >= minimum_new_capacity) {
|
1859 |
+
return;
|
1860 |
+
}
|
1861 |
+
|
1862 |
+
do {
|
1863 |
+
flat_capacity_ = flat_capacity_ == 0 ? 1 : flat_capacity_ * 4;
|
1864 |
+
} while (flat_capacity_ < minimum_new_capacity);
|
1865 |
+
|
1866 |
+
const KeyValue* begin = flat_begin();
|
1867 |
+
const KeyValue* end = flat_end();
|
1868 |
+
if (flat_capacity_ > kMaximumFlatCapacity) {
|
1869 |
+
// Switch to LargeMap
|
1870 |
+
map_.large = ::google::protobuf::Arena::Create<LargeMap>(arena_);
|
1871 |
+
LargeMap::iterator hint = map_.large->begin();
|
1872 |
+
for (const KeyValue* it = begin; it != end; ++it) {
|
1873 |
+
hint = map_.large->insert(hint, {it->first, it->second});
|
1874 |
+
}
|
1875 |
+
flat_size_ = 0;
|
1876 |
+
} else {
|
1877 |
+
map_.flat = ::google::protobuf::Arena::CreateArray<KeyValue>(arena_, flat_capacity_);
|
1878 |
+
std::copy(begin, end, map_.flat);
|
1879 |
+
}
|
1880 |
+
if (arena_ == NULL) delete[] begin;
|
1881 |
+
}
|
1882 |
+
|
1883 |
+
// static
|
1884 |
+
constexpr uint16 ExtensionSet::kMaximumFlatCapacity;
|
1885 |
+
|
1886 |
+
void ExtensionSet::Erase(int key) {
|
1887 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
1888 |
+
map_.large->erase(key);
|
1889 |
+
return;
|
1890 |
+
}
|
1891 |
+
KeyValue* end = flat_end();
|
1892 |
+
KeyValue* it =
|
1893 |
+
std::lower_bound(flat_begin(), end, key, KeyValue::FirstComparator());
|
1894 |
+
if (it != end && it->first == key) {
|
1895 |
+
std::copy(it + 1, end, it);
|
1896 |
+
--flat_size_;
|
1897 |
+
}
|
1898 |
+
}
|
1899 |
+
|
1900 |
+
// ==================================================================
|
1901 |
+
// Default repeated field instances for iterator-compatible accessors
|
1902 |
+
|
1903 |
+
const RepeatedPrimitiveDefaults* RepeatedPrimitiveDefaults::default_instance() {
|
1904 |
+
static auto instance = OnShutdownDelete(new RepeatedPrimitiveDefaults);
|
1905 |
+
return instance;
|
1906 |
+
}
|
1907 |
+
|
1908 |
+
const RepeatedStringTypeTraits::RepeatedFieldType*
|
1909 |
+
RepeatedStringTypeTraits::GetDefaultRepeatedField() {
|
1910 |
+
static auto instance = OnShutdownDelete(new RepeatedFieldType);
|
1911 |
+
return instance;
|
1912 |
+
}
|
1913 |
+
|
1914 |
+
} // namespace internal
|
1915 |
+
} // namespace protobuf
|
1916 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/generated_message_util.cc
ADDED
@@ -0,0 +1,814 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
|
35 |
+
#include <google/protobuf/generated_message_util.h>
|
36 |
+
|
37 |
+
#include <limits>
|
38 |
+
// We're only using this as a standard way for getting the thread id.
|
39 |
+
// We're not using any thread functionality.
|
40 |
+
#include <thread> // NOLINT
|
41 |
+
#include <vector>
|
42 |
+
|
43 |
+
#include <google/protobuf/io/coded_stream_inl.h>
|
44 |
+
#include <google/protobuf/io/coded_stream.h>
|
45 |
+
#include <google/protobuf/arenastring.h>
|
46 |
+
#include <google/protobuf/extension_set.h>
|
47 |
+
#include <google/protobuf/message_lite.h>
|
48 |
+
#include <google/protobuf/metadata_lite.h>
|
49 |
+
#include <google/protobuf/stubs/mutex.h>
|
50 |
+
#include <google/protobuf/stubs/port.h>
|
51 |
+
#include <google/protobuf/repeated_field.h>
|
52 |
+
#include <google/protobuf/wire_format_lite.h>
|
53 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
54 |
+
|
55 |
+
namespace google {
|
56 |
+
|
57 |
+
namespace protobuf {
|
58 |
+
namespace internal {
|
59 |
+
|
60 |
+
void DestroyMessage(const void* message) {
|
61 |
+
static_cast<const MessageLite*>(message)->~MessageLite();
|
62 |
+
}
|
63 |
+
void DestroyString(const void* s) { static_cast<const string*>(s)->~string(); }
|
64 |
+
|
65 |
+
ExplicitlyConstructed<std::string> fixed_address_empty_string;
|
66 |
+
|
67 |
+
double Infinity() {
|
68 |
+
return std::numeric_limits<double>::infinity();
|
69 |
+
}
|
70 |
+
double NaN() {
|
71 |
+
return std::numeric_limits<double>::quiet_NaN();
|
72 |
+
}
|
73 |
+
|
74 |
+
static bool InitProtobufDefaultsImpl() {
|
75 |
+
fixed_address_empty_string.DefaultConstruct();
|
76 |
+
OnShutdownDestroyString(fixed_address_empty_string.get_mutable());
|
77 |
+
return true;
|
78 |
+
}
|
79 |
+
|
80 |
+
void InitProtobufDefaults() {
|
81 |
+
static bool is_inited = InitProtobufDefaultsImpl();
|
82 |
+
(void)is_inited;
|
83 |
+
}
|
84 |
+
|
85 |
+
size_t StringSpaceUsedExcludingSelfLong(const string& str) {
|
86 |
+
const void* start = &str;
|
87 |
+
const void* end = &str + 1;
|
88 |
+
if (start <= str.data() && str.data() < end) {
|
89 |
+
// The string's data is stored inside the string object itself.
|
90 |
+
return 0;
|
91 |
+
} else {
|
92 |
+
return str.capacity();
|
93 |
+
}
|
94 |
+
}
|
95 |
+
|
96 |
+
template <typename T>
|
97 |
+
const T& Get(const void* ptr) {
|
98 |
+
return *static_cast<const T*>(ptr);
|
99 |
+
}
|
100 |
+
|
101 |
+
// PrimitiveTypeHelper is a wrapper around the interface of WireFormatLite.
|
102 |
+
// WireFormatLite has a very inconvenient interface with respect to template
|
103 |
+
// meta-programming. This class wraps the different named functions into
|
104 |
+
// a single Serialize / SerializeToArray interface.
|
105 |
+
template <int type>
|
106 |
+
struct PrimitiveTypeHelper;
|
107 |
+
|
108 |
+
template <>
|
109 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_BOOL> {
|
110 |
+
typedef bool Type;
|
111 |
+
static void Serialize(const void* ptr,
|
112 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
113 |
+
WireFormatLite::WriteBoolNoTag(Get<bool>(ptr), output);
|
114 |
+
}
|
115 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
116 |
+
return WireFormatLite::WriteBoolNoTagToArray(Get<Type>(ptr), buffer);
|
117 |
+
}
|
118 |
+
};
|
119 |
+
|
120 |
+
template <>
|
121 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_INT32> {
|
122 |
+
typedef int32 Type;
|
123 |
+
static void Serialize(const void* ptr,
|
124 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
125 |
+
WireFormatLite::WriteInt32NoTag(Get<int32>(ptr), output);
|
126 |
+
}
|
127 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
128 |
+
return WireFormatLite::WriteInt32NoTagToArray(Get<Type>(ptr), buffer);
|
129 |
+
}
|
130 |
+
};
|
131 |
+
|
132 |
+
template <>
|
133 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_SINT32> {
|
134 |
+
typedef int32 Type;
|
135 |
+
static void Serialize(const void* ptr,
|
136 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
137 |
+
WireFormatLite::WriteSInt32NoTag(Get<int32>(ptr), output);
|
138 |
+
}
|
139 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
140 |
+
return WireFormatLite::WriteSInt32NoTagToArray(Get<Type>(ptr), buffer);
|
141 |
+
}
|
142 |
+
};
|
143 |
+
|
144 |
+
template <>
|
145 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_UINT32> {
|
146 |
+
typedef uint32 Type;
|
147 |
+
static void Serialize(const void* ptr,
|
148 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
149 |
+
WireFormatLite::WriteUInt32NoTag(Get<uint32>(ptr), output);
|
150 |
+
}
|
151 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
152 |
+
return WireFormatLite::WriteUInt32NoTagToArray(Get<Type>(ptr), buffer);
|
153 |
+
}
|
154 |
+
};
|
155 |
+
template <>
|
156 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_INT64> {
|
157 |
+
typedef int64 Type;
|
158 |
+
static void Serialize(const void* ptr,
|
159 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
160 |
+
WireFormatLite::WriteInt64NoTag(Get<int64>(ptr), output);
|
161 |
+
}
|
162 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
163 |
+
return WireFormatLite::WriteInt64NoTagToArray(Get<Type>(ptr), buffer);
|
164 |
+
}
|
165 |
+
};
|
166 |
+
|
167 |
+
template <>
|
168 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_SINT64> {
|
169 |
+
typedef int64 Type;
|
170 |
+
static void Serialize(const void* ptr,
|
171 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
172 |
+
WireFormatLite::WriteSInt64NoTag(Get<int64>(ptr), output);
|
173 |
+
}
|
174 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
175 |
+
return WireFormatLite::WriteSInt64NoTagToArray(Get<Type>(ptr), buffer);
|
176 |
+
}
|
177 |
+
};
|
178 |
+
template <>
|
179 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_UINT64> {
|
180 |
+
typedef uint64 Type;
|
181 |
+
static void Serialize(const void* ptr,
|
182 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
183 |
+
WireFormatLite::WriteUInt64NoTag(Get<uint64>(ptr), output);
|
184 |
+
}
|
185 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
186 |
+
return WireFormatLite::WriteUInt64NoTagToArray(Get<Type>(ptr), buffer);
|
187 |
+
}
|
188 |
+
};
|
189 |
+
|
190 |
+
template <>
|
191 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED32> {
|
192 |
+
typedef uint32 Type;
|
193 |
+
static void Serialize(const void* ptr,
|
194 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
195 |
+
WireFormatLite::WriteFixed32NoTag(Get<uint32>(ptr), output);
|
196 |
+
}
|
197 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
198 |
+
return WireFormatLite::WriteFixed32NoTagToArray(Get<Type>(ptr), buffer);
|
199 |
+
}
|
200 |
+
};
|
201 |
+
|
202 |
+
template <>
|
203 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED64> {
|
204 |
+
typedef uint64 Type;
|
205 |
+
static void Serialize(const void* ptr,
|
206 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
207 |
+
WireFormatLite::WriteFixed64NoTag(Get<uint64>(ptr), output);
|
208 |
+
}
|
209 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
210 |
+
return WireFormatLite::WriteFixed64NoTagToArray(Get<Type>(ptr), buffer);
|
211 |
+
}
|
212 |
+
};
|
213 |
+
|
214 |
+
template <>
|
215 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_ENUM>
|
216 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_INT32> {};
|
217 |
+
|
218 |
+
template <>
|
219 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_SFIXED32>
|
220 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED32> {
|
221 |
+
typedef int32 Type;
|
222 |
+
};
|
223 |
+
template <>
|
224 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_SFIXED64>
|
225 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED64> {
|
226 |
+
typedef int64 Type;
|
227 |
+
};
|
228 |
+
template <>
|
229 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_FLOAT>
|
230 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED32> {
|
231 |
+
typedef float Type;
|
232 |
+
};
|
233 |
+
template <>
|
234 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_DOUBLE>
|
235 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_FIXED64> {
|
236 |
+
typedef double Type;
|
237 |
+
};
|
238 |
+
|
239 |
+
template <>
|
240 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_STRING> {
|
241 |
+
typedef string Type;
|
242 |
+
static void Serialize(const void* ptr,
|
243 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
244 |
+
const Type& value = *static_cast<const Type*>(ptr);
|
245 |
+
output->WriteVarint32(value.size());
|
246 |
+
output->WriteRawMaybeAliased(value.data(), value.size());
|
247 |
+
}
|
248 |
+
static uint8* SerializeToArray(const void* ptr, uint8* buffer) {
|
249 |
+
const Type& value = *static_cast<const Type*>(ptr);
|
250 |
+
return io::CodedOutputStream::WriteStringWithSizeToArray(value, buffer);
|
251 |
+
}
|
252 |
+
};
|
253 |
+
|
254 |
+
template <>
|
255 |
+
struct PrimitiveTypeHelper<WireFormatLite::TYPE_BYTES>
|
256 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_STRING> {};
|
257 |
+
|
258 |
+
|
259 |
+
template <>
|
260 |
+
struct PrimitiveTypeHelper<FieldMetadata::kInlinedType>
|
261 |
+
: PrimitiveTypeHelper<WireFormatLite::TYPE_STRING> {};
|
262 |
+
|
263 |
+
// We want to serialize to both CodedOutputStream and directly into byte arrays
|
264 |
+
// without duplicating the code. In fact we might want extra output channels in
|
265 |
+
// the future.
|
266 |
+
template <typename O, int type>
|
267 |
+
struct OutputHelper;
|
268 |
+
|
269 |
+
template <int type, typename O>
|
270 |
+
void SerializeTo(const void* ptr, O* output) {
|
271 |
+
OutputHelper<O, type>::Serialize(ptr, output);
|
272 |
+
}
|
273 |
+
|
274 |
+
template <typename O>
|
275 |
+
void WriteTagTo(uint32 tag, O* output) {
|
276 |
+
SerializeTo<WireFormatLite::TYPE_UINT32>(&tag, output);
|
277 |
+
}
|
278 |
+
|
279 |
+
template <typename O>
|
280 |
+
void WriteLengthTo(uint32 length, O* output) {
|
281 |
+
SerializeTo<WireFormatLite::TYPE_UINT32>(&length, output);
|
282 |
+
}
|
283 |
+
|
284 |
+
// Specialization for coded output stream
|
285 |
+
template <int type>
|
286 |
+
struct OutputHelper<::google::protobuf::io::CodedOutputStream, type> {
|
287 |
+
static void Serialize(const void* ptr,
|
288 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
289 |
+
PrimitiveTypeHelper<type>::Serialize(ptr, output);
|
290 |
+
}
|
291 |
+
};
|
292 |
+
|
293 |
+
// Specialization for writing into a plain array
|
294 |
+
struct ArrayOutput {
|
295 |
+
uint8* ptr;
|
296 |
+
bool is_deterministic;
|
297 |
+
};
|
298 |
+
|
299 |
+
template <int type>
|
300 |
+
struct OutputHelper<ArrayOutput, type> {
|
301 |
+
static void Serialize(const void* ptr, ArrayOutput* output) {
|
302 |
+
output->ptr = PrimitiveTypeHelper<type>::SerializeToArray(ptr, output->ptr);
|
303 |
+
}
|
304 |
+
};
|
305 |
+
|
306 |
+
void SerializeMessageNoTable(const MessageLite* msg,
|
307 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
308 |
+
msg->SerializeWithCachedSizes(output);
|
309 |
+
}
|
310 |
+
|
311 |
+
void SerializeMessageNoTable(const MessageLite* msg, ArrayOutput* output) {
|
312 |
+
output->ptr = msg->InternalSerializeWithCachedSizesToArray(
|
313 |
+
output->is_deterministic, output->ptr);
|
314 |
+
}
|
315 |
+
|
316 |
+
// Helper to branch to fast path if possible
|
317 |
+
void SerializeMessageDispatch(const ::google::protobuf::MessageLite& msg,
|
318 |
+
const FieldMetadata* field_table, int num_fields,
|
319 |
+
int32 cached_size,
|
320 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
321 |
+
const uint8* base = reinterpret_cast<const uint8*>(&msg);
|
322 |
+
// Try the fast path
|
323 |
+
uint8* ptr = output->GetDirectBufferForNBytesAndAdvance(cached_size);
|
324 |
+
if (ptr) {
|
325 |
+
// We use virtual dispatch to enable dedicated generated code for the
|
326 |
+
// fast path.
|
327 |
+
msg.InternalSerializeWithCachedSizesToArray(
|
328 |
+
output->IsSerializationDeterministic(), ptr);
|
329 |
+
return;
|
330 |
+
}
|
331 |
+
SerializeInternal(base, field_table, num_fields, output);
|
332 |
+
}
|
333 |
+
|
334 |
+
// Helper to branch to fast path if possible
|
335 |
+
void SerializeMessageDispatch(const ::google::protobuf::MessageLite& msg,
|
336 |
+
const FieldMetadata* field_table, int num_fields,
|
337 |
+
int32 cached_size, ArrayOutput* output) {
|
338 |
+
const uint8* base = reinterpret_cast<const uint8*>(&msg);
|
339 |
+
output->ptr = SerializeInternalToArray(base, field_table, num_fields,
|
340 |
+
output->is_deterministic, output->ptr);
|
341 |
+
}
|
342 |
+
|
343 |
+
// Serializing messages is special as it's not a primitive type and needs an
|
344 |
+
// explicit overload for each output type.
|
345 |
+
template <typename O>
|
346 |
+
void SerializeMessageTo(const MessageLite* msg, const void* table_ptr,
|
347 |
+
O* output) {
|
348 |
+
const SerializationTable* table =
|
349 |
+
static_cast<const SerializationTable*>(table_ptr);
|
350 |
+
if (!table) {
|
351 |
+
// Proto1
|
352 |
+
WriteLengthTo(msg->GetCachedSize(), output);
|
353 |
+
SerializeMessageNoTable(msg, output);
|
354 |
+
return;
|
355 |
+
}
|
356 |
+
const FieldMetadata* field_table = table->field_table;
|
357 |
+
const uint8* base = reinterpret_cast<const uint8*>(msg);
|
358 |
+
int cached_size = *reinterpret_cast<const int32*>(base + field_table->offset);
|
359 |
+
WriteLengthTo(cached_size, output);
|
360 |
+
int num_fields = table->num_fields - 1;
|
361 |
+
SerializeMessageDispatch(*msg, field_table + 1, num_fields, cached_size,
|
362 |
+
output);
|
363 |
+
}
|
364 |
+
|
365 |
+
// Almost the same as above only it doesn't output the length field.
|
366 |
+
template <typename O>
|
367 |
+
void SerializeGroupTo(const MessageLite* msg, const void* table_ptr,
|
368 |
+
O* output) {
|
369 |
+
const SerializationTable* table =
|
370 |
+
static_cast<const SerializationTable*>(table_ptr);
|
371 |
+
if (!table) {
|
372 |
+
// Proto1
|
373 |
+
SerializeMessageNoTable(msg, output);
|
374 |
+
return;
|
375 |
+
}
|
376 |
+
const FieldMetadata* field_table = table->field_table;
|
377 |
+
const uint8* base = reinterpret_cast<const uint8*>(msg);
|
378 |
+
int cached_size = *reinterpret_cast<const int32*>(base + field_table->offset);
|
379 |
+
int num_fields = table->num_fields - 1;
|
380 |
+
SerializeMessageDispatch(*msg, field_table + 1, num_fields, cached_size,
|
381 |
+
output);
|
382 |
+
}
|
383 |
+
|
384 |
+
template <int type>
|
385 |
+
struct SingularFieldHelper {
|
386 |
+
template <typename O>
|
387 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
388 |
+
WriteTagTo(md.tag, output);
|
389 |
+
SerializeTo<type>(field, output);
|
390 |
+
}
|
391 |
+
};
|
392 |
+
|
393 |
+
template <>
|
394 |
+
struct SingularFieldHelper<WireFormatLite::TYPE_STRING> {
|
395 |
+
template <typename O>
|
396 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
397 |
+
WriteTagTo(md.tag, output);
|
398 |
+
SerializeTo<WireFormatLite::TYPE_STRING>(&Get<ArenaStringPtr>(field).Get(),
|
399 |
+
output);
|
400 |
+
}
|
401 |
+
};
|
402 |
+
|
403 |
+
template <>
|
404 |
+
struct SingularFieldHelper<WireFormatLite::TYPE_BYTES>
|
405 |
+
: SingularFieldHelper<WireFormatLite::TYPE_STRING> {};
|
406 |
+
|
407 |
+
template <>
|
408 |
+
struct SingularFieldHelper<WireFormatLite::TYPE_GROUP> {
|
409 |
+
template <typename O>
|
410 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
411 |
+
WriteTagTo(md.tag, output);
|
412 |
+
SerializeGroupTo(Get<const MessageLite*>(field),
|
413 |
+
static_cast<const SerializationTable*>(md.ptr), output);
|
414 |
+
WriteTagTo(md.tag + 1, output);
|
415 |
+
}
|
416 |
+
};
|
417 |
+
|
418 |
+
template <>
|
419 |
+
struct SingularFieldHelper<WireFormatLite::TYPE_MESSAGE> {
|
420 |
+
template <typename O>
|
421 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
422 |
+
WriteTagTo(md.tag, output);
|
423 |
+
SerializeMessageTo(Get<const MessageLite*>(field),
|
424 |
+
static_cast<const SerializationTable*>(md.ptr), output);
|
425 |
+
}
|
426 |
+
};
|
427 |
+
|
428 |
+
template <>
|
429 |
+
struct SingularFieldHelper<FieldMetadata::kInlinedType> {
|
430 |
+
template <typename O>
|
431 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
432 |
+
WriteTagTo(md.tag, output);
|
433 |
+
SerializeTo<FieldMetadata::kInlinedType>(&Get<::std::string>(field), output);
|
434 |
+
}
|
435 |
+
};
|
436 |
+
|
437 |
+
template <int type>
|
438 |
+
struct RepeatedFieldHelper {
|
439 |
+
template <typename O>
|
440 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
441 |
+
typedef typename PrimitiveTypeHelper<type>::Type T;
|
442 |
+
const RepeatedField<T>& array = Get<RepeatedField<T> >(field);
|
443 |
+
for (int i = 0; i < array.size(); i++) {
|
444 |
+
WriteTagTo(md.tag, output);
|
445 |
+
SerializeTo<type>(&array[i], output);
|
446 |
+
}
|
447 |
+
}
|
448 |
+
};
|
449 |
+
|
450 |
+
// We need to use a helper class to get access to the private members
|
451 |
+
class AccessorHelper {
|
452 |
+
public:
|
453 |
+
static int Size(const RepeatedPtrFieldBase& x) { return x.size(); }
|
454 |
+
static void const* Get(const RepeatedPtrFieldBase& x, int idx) {
|
455 |
+
return x.raw_data()[idx];
|
456 |
+
}
|
457 |
+
};
|
458 |
+
|
459 |
+
template <>
|
460 |
+
struct RepeatedFieldHelper<WireFormatLite::TYPE_STRING> {
|
461 |
+
template <typename O>
|
462 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
463 |
+
const internal::RepeatedPtrFieldBase& array =
|
464 |
+
Get<internal::RepeatedPtrFieldBase>(field);
|
465 |
+
for (int i = 0; i < AccessorHelper::Size(array); i++) {
|
466 |
+
WriteTagTo(md.tag, output);
|
467 |
+
SerializeTo<WireFormatLite::TYPE_STRING>(AccessorHelper::Get(array, i),
|
468 |
+
output);
|
469 |
+
}
|
470 |
+
}
|
471 |
+
};
|
472 |
+
|
473 |
+
template <>
|
474 |
+
struct RepeatedFieldHelper<WireFormatLite::TYPE_BYTES>
|
475 |
+
: RepeatedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
476 |
+
|
477 |
+
template <>
|
478 |
+
struct RepeatedFieldHelper<WireFormatLite::TYPE_GROUP> {
|
479 |
+
template <typename O>
|
480 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
481 |
+
const internal::RepeatedPtrFieldBase& array =
|
482 |
+
Get<internal::RepeatedPtrFieldBase>(field);
|
483 |
+
for (int i = 0; i < AccessorHelper::Size(array); i++) {
|
484 |
+
WriteTagTo(md.tag, output);
|
485 |
+
SerializeGroupTo(
|
486 |
+
static_cast<const MessageLite*>(AccessorHelper::Get(array, i)),
|
487 |
+
static_cast<const SerializationTable*>(md.ptr), output);
|
488 |
+
WriteTagTo(md.tag + 1, output);
|
489 |
+
}
|
490 |
+
}
|
491 |
+
};
|
492 |
+
|
493 |
+
template <>
|
494 |
+
struct RepeatedFieldHelper<WireFormatLite::TYPE_MESSAGE> {
|
495 |
+
template <typename O>
|
496 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
497 |
+
const internal::RepeatedPtrFieldBase& array =
|
498 |
+
Get<internal::RepeatedPtrFieldBase>(field);
|
499 |
+
for (int i = 0; i < AccessorHelper::Size(array); i++) {
|
500 |
+
WriteTagTo(md.tag, output);
|
501 |
+
SerializeMessageTo(
|
502 |
+
static_cast<const MessageLite*>(AccessorHelper::Get(array, i)), md.ptr,
|
503 |
+
output);
|
504 |
+
}
|
505 |
+
}
|
506 |
+
};
|
507 |
+
|
508 |
+
|
509 |
+
template <>
|
510 |
+
struct RepeatedFieldHelper<FieldMetadata::kInlinedType>
|
511 |
+
: RepeatedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
512 |
+
|
513 |
+
template <int type>
|
514 |
+
struct PackedFieldHelper {
|
515 |
+
template <typename O>
|
516 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
517 |
+
typedef typename PrimitiveTypeHelper<type>::Type T;
|
518 |
+
const RepeatedField<T>& array = Get<RepeatedField<T> >(field);
|
519 |
+
if (array.empty()) return;
|
520 |
+
WriteTagTo(md.tag, output);
|
521 |
+
int cached_size =
|
522 |
+
Get<int>(static_cast<const uint8*>(field) + sizeof(RepeatedField<T>));
|
523 |
+
WriteLengthTo(cached_size, output);
|
524 |
+
for (int i = 0; i < array.size(); i++) {
|
525 |
+
SerializeTo<type>(&array[i], output);
|
526 |
+
}
|
527 |
+
}
|
528 |
+
};
|
529 |
+
|
530 |
+
template <>
|
531 |
+
struct PackedFieldHelper<WireFormatLite::TYPE_STRING> {
|
532 |
+
template <typename O>
|
533 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
534 |
+
GOOGLE_LOG(FATAL) << "Not implemented field number " << md.tag << " with type "
|
535 |
+
<< md.type;
|
536 |
+
}
|
537 |
+
};
|
538 |
+
|
539 |
+
template <>
|
540 |
+
struct PackedFieldHelper<WireFormatLite::TYPE_BYTES>
|
541 |
+
: PackedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
542 |
+
template <>
|
543 |
+
struct PackedFieldHelper<WireFormatLite::TYPE_GROUP>
|
544 |
+
: PackedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
545 |
+
template <>
|
546 |
+
struct PackedFieldHelper<WireFormatLite::TYPE_MESSAGE>
|
547 |
+
: PackedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
548 |
+
template <>
|
549 |
+
struct PackedFieldHelper<FieldMetadata::kInlinedType>
|
550 |
+
: PackedFieldHelper<WireFormatLite::TYPE_STRING> {};
|
551 |
+
|
552 |
+
template <int type>
|
553 |
+
struct OneOfFieldHelper {
|
554 |
+
template <typename O>
|
555 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
556 |
+
SingularFieldHelper<type>::Serialize(field, md, output);
|
557 |
+
}
|
558 |
+
};
|
559 |
+
|
560 |
+
|
561 |
+
template <>
|
562 |
+
struct OneOfFieldHelper<FieldMetadata::kInlinedType> {
|
563 |
+
template <typename O>
|
564 |
+
static void Serialize(const void* field, const FieldMetadata& md, O* output) {
|
565 |
+
SingularFieldHelper<FieldMetadata::kInlinedType>::Serialize(
|
566 |
+
Get<const ::std::string*>(field), md, output);
|
567 |
+
}
|
568 |
+
};
|
569 |
+
|
570 |
+
void SerializeNotImplemented(int field) {
|
571 |
+
GOOGLE_LOG(FATAL) << "Not implemented field number " << field;
|
572 |
+
}
|
573 |
+
|
574 |
+
// When switching to c++11 we should make these constexpr functions
|
575 |
+
#define SERIALIZE_TABLE_OP(type, type_class) \
|
576 |
+
((type - 1) + static_cast<int>(type_class) * FieldMetadata::kNumTypes)
|
577 |
+
|
578 |
+
int FieldMetadata::CalculateType(int type,
|
579 |
+
FieldMetadata::FieldTypeClass type_class) {
|
580 |
+
return SERIALIZE_TABLE_OP(type, type_class);
|
581 |
+
}
|
582 |
+
|
583 |
+
template <int type>
|
584 |
+
bool IsNull(const void* ptr) {
|
585 |
+
return *static_cast<const typename PrimitiveTypeHelper<type>::Type*>(ptr) ==
|
586 |
+
0;
|
587 |
+
}
|
588 |
+
|
589 |
+
template <>
|
590 |
+
bool IsNull<WireFormatLite::TYPE_STRING>(const void* ptr) {
|
591 |
+
return static_cast<const ArenaStringPtr*>(ptr)->Get().size() == 0;
|
592 |
+
}
|
593 |
+
|
594 |
+
template <>
|
595 |
+
bool IsNull<WireFormatLite::TYPE_BYTES>(const void* ptr) {
|
596 |
+
return static_cast<const ArenaStringPtr*>(ptr)->Get().size() == 0;
|
597 |
+
}
|
598 |
+
|
599 |
+
template <>
|
600 |
+
bool IsNull<WireFormatLite::TYPE_GROUP>(const void* ptr) {
|
601 |
+
return Get<const MessageLite*>(ptr) == NULL;
|
602 |
+
}
|
603 |
+
|
604 |
+
template <>
|
605 |
+
bool IsNull<WireFormatLite::TYPE_MESSAGE>(const void* ptr) {
|
606 |
+
return Get<const MessageLite*>(ptr) == NULL;
|
607 |
+
}
|
608 |
+
|
609 |
+
|
610 |
+
template <>
|
611 |
+
bool IsNull<FieldMetadata::kInlinedType>(const void* ptr) {
|
612 |
+
return static_cast<const ::std::string*>(ptr)->empty();
|
613 |
+
}
|
614 |
+
|
615 |
+
#define SERIALIZERS_FOR_TYPE(type) \
|
616 |
+
case SERIALIZE_TABLE_OP(type, FieldMetadata::kPresence): \
|
617 |
+
if (!IsPresent(base, field_metadata.has_offset)) continue; \
|
618 |
+
SingularFieldHelper<type>::Serialize(ptr, field_metadata, output); \
|
619 |
+
break; \
|
620 |
+
case SERIALIZE_TABLE_OP(type, FieldMetadata::kNoPresence): \
|
621 |
+
if (IsNull<type>(ptr)) continue; \
|
622 |
+
SingularFieldHelper<type>::Serialize(ptr, field_metadata, output); \
|
623 |
+
break; \
|
624 |
+
case SERIALIZE_TABLE_OP(type, FieldMetadata::kRepeated): \
|
625 |
+
RepeatedFieldHelper<type>::Serialize(ptr, field_metadata, output); \
|
626 |
+
break; \
|
627 |
+
case SERIALIZE_TABLE_OP(type, FieldMetadata::kPacked): \
|
628 |
+
PackedFieldHelper<type>::Serialize(ptr, field_metadata, output); \
|
629 |
+
break; \
|
630 |
+
case SERIALIZE_TABLE_OP(type, FieldMetadata::kOneOf): \
|
631 |
+
if (!IsOneofPresent(base, field_metadata.has_offset, field_metadata.tag)) \
|
632 |
+
continue; \
|
633 |
+
OneOfFieldHelper<type>::Serialize(ptr, field_metadata, output); \
|
634 |
+
break
|
635 |
+
|
636 |
+
void SerializeInternal(const uint8* base,
|
637 |
+
const FieldMetadata* field_metadata_table,
|
638 |
+
int32 num_fields,
|
639 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
640 |
+
for (int i = 0; i < num_fields; i++) {
|
641 |
+
const FieldMetadata& field_metadata = field_metadata_table[i];
|
642 |
+
const uint8* ptr = base + field_metadata.offset;
|
643 |
+
switch (field_metadata.type) {
|
644 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_DOUBLE);
|
645 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FLOAT);
|
646 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT64);
|
647 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT64);
|
648 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT32);
|
649 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED64);
|
650 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED32);
|
651 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BOOL);
|
652 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_STRING);
|
653 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_GROUP);
|
654 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_MESSAGE);
|
655 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BYTES);
|
656 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT32);
|
657 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_ENUM);
|
658 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED32);
|
659 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED64);
|
660 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT32);
|
661 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT64);
|
662 |
+
SERIALIZERS_FOR_TYPE(FieldMetadata::kInlinedType);
|
663 |
+
|
664 |
+
// Special cases
|
665 |
+
case FieldMetadata::kSpecial:
|
666 |
+
reinterpret_cast<SpecialSerializer>(
|
667 |
+
const_cast<void*>(field_metadata.ptr))(
|
668 |
+
base, field_metadata.offset, field_metadata.tag,
|
669 |
+
field_metadata.has_offset, output);
|
670 |
+
break;
|
671 |
+
default:
|
672 |
+
// __builtin_unreachable()
|
673 |
+
SerializeNotImplemented(field_metadata.type);
|
674 |
+
}
|
675 |
+
}
|
676 |
+
}
|
677 |
+
|
678 |
+
uint8* SerializeInternalToArray(const uint8* base,
|
679 |
+
const FieldMetadata* field_metadata_table,
|
680 |
+
int32 num_fields, bool is_deterministic,
|
681 |
+
uint8* buffer) {
|
682 |
+
ArrayOutput array_output = {buffer, is_deterministic};
|
683 |
+
ArrayOutput* output = &array_output;
|
684 |
+
for (int i = 0; i < num_fields; i++) {
|
685 |
+
const FieldMetadata& field_metadata = field_metadata_table[i];
|
686 |
+
const uint8* ptr = base + field_metadata.offset;
|
687 |
+
switch (field_metadata.type) {
|
688 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_DOUBLE);
|
689 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FLOAT);
|
690 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT64);
|
691 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT64);
|
692 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_INT32);
|
693 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED64);
|
694 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_FIXED32);
|
695 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BOOL);
|
696 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_STRING);
|
697 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_GROUP);
|
698 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_MESSAGE);
|
699 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_BYTES);
|
700 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_UINT32);
|
701 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_ENUM);
|
702 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED32);
|
703 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SFIXED64);
|
704 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT32);
|
705 |
+
SERIALIZERS_FOR_TYPE(WireFormatLite::TYPE_SINT64);
|
706 |
+
SERIALIZERS_FOR_TYPE(FieldMetadata::kInlinedType);
|
707 |
+
// Special cases
|
708 |
+
case FieldMetadata::kSpecial: {
|
709 |
+
io::ArrayOutputStream array_stream(array_output.ptr, INT_MAX);
|
710 |
+
io::CodedOutputStream output(&array_stream);
|
711 |
+
output.SetSerializationDeterministic(is_deterministic);
|
712 |
+
reinterpret_cast<SpecialSerializer>(
|
713 |
+
const_cast<void*>(field_metadata.ptr))(
|
714 |
+
base, field_metadata.offset, field_metadata.tag,
|
715 |
+
field_metadata.has_offset, &output);
|
716 |
+
array_output.ptr += output.ByteCount();
|
717 |
+
} break;
|
718 |
+
default:
|
719 |
+
// __builtin_unreachable()
|
720 |
+
SerializeNotImplemented(field_metadata.type);
|
721 |
+
}
|
722 |
+
}
|
723 |
+
return array_output.ptr;
|
724 |
+
}
|
725 |
+
#undef SERIALIZERS_FOR_TYPE
|
726 |
+
|
727 |
+
void ExtensionSerializer(const uint8* ptr, uint32 offset, uint32 tag,
|
728 |
+
uint32 has_offset,
|
729 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
730 |
+
reinterpret_cast<const ExtensionSet*>(ptr + offset)
|
731 |
+
->SerializeWithCachedSizes(tag, has_offset, output);
|
732 |
+
}
|
733 |
+
|
734 |
+
void UnknownFieldSerializerLite(const uint8* ptr, uint32 offset, uint32 tag,
|
735 |
+
uint32 has_offset,
|
736 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
737 |
+
output->WriteString(
|
738 |
+
reinterpret_cast<const InternalMetadataWithArenaLite*>(ptr + offset)
|
739 |
+
->unknown_fields());
|
740 |
+
}
|
741 |
+
|
742 |
+
MessageLite* DuplicateIfNonNullInternal(MessageLite* message) {
|
743 |
+
if (message) {
|
744 |
+
MessageLite* ret = message->New();
|
745 |
+
ret->CheckTypeAndMergeFrom(*message);
|
746 |
+
return ret;
|
747 |
+
} else {
|
748 |
+
return NULL;
|
749 |
+
}
|
750 |
+
}
|
751 |
+
|
752 |
+
// Returns a message owned by this Arena. This may require Own()ing or
|
753 |
+
// duplicating the message.
|
754 |
+
MessageLite* GetOwnedMessageInternal(Arena* message_arena,
|
755 |
+
MessageLite* submessage,
|
756 |
+
Arena* submessage_arena) {
|
757 |
+
GOOGLE_DCHECK(submessage->GetArena() == submessage_arena);
|
758 |
+
GOOGLE_DCHECK(message_arena != submessage_arena);
|
759 |
+
if (message_arena != NULL && submessage_arena == NULL) {
|
760 |
+
message_arena->Own(submessage);
|
761 |
+
return submessage;
|
762 |
+
} else {
|
763 |
+
MessageLite* ret = submessage->New(message_arena);
|
764 |
+
ret->CheckTypeAndMergeFrom(*submessage);
|
765 |
+
return ret;
|
766 |
+
}
|
767 |
+
}
|
768 |
+
|
769 |
+
namespace {
|
770 |
+
|
771 |
+
void InitSCC_DFS(SCCInfoBase* scc) {
|
772 |
+
if (scc->visit_status.load(std::memory_order_relaxed) !=
|
773 |
+
SCCInfoBase::kUninitialized) return;
|
774 |
+
scc->visit_status.store(SCCInfoBase::kRunning, std::memory_order_relaxed);
|
775 |
+
// Each base is followed by an array of pointers to deps
|
776 |
+
auto deps = reinterpret_cast<SCCInfoBase* const*>(scc + 1);
|
777 |
+
for (int i = 0; i < scc->num_deps; i++) {
|
778 |
+
if (deps[i]) InitSCC_DFS(deps[i]);
|
779 |
+
}
|
780 |
+
scc->init_func();
|
781 |
+
// Mark done (note we use memory order release here), other threads could
|
782 |
+
// now see this as initialized and thus the initialization must have happened
|
783 |
+
// before.
|
784 |
+
scc->visit_status.store(SCCInfoBase::kInitialized, std::memory_order_release);
|
785 |
+
}
|
786 |
+
|
787 |
+
} // namespace
|
788 |
+
|
789 |
+
void InitSCCImpl(SCCInfoBase* scc) {
|
790 |
+
static WrappedMutex mu{GOOGLE_PROTOBUF_LINKER_INITIALIZED};
|
791 |
+
// Either the default in case no initialization is running or the id of the
|
792 |
+
// thread that is currently initializing.
|
793 |
+
static std::atomic<std::thread::id> runner;
|
794 |
+
auto me = std::this_thread::get_id();
|
795 |
+
// This will only happen because the constructor will call InitSCC while
|
796 |
+
// constructing the default instance.
|
797 |
+
if (runner.load(std::memory_order_relaxed) == me) {
|
798 |
+
// Because we're in the process of constructing the default instance.
|
799 |
+
// We can be assured that we're already exploring this SCC.
|
800 |
+
GOOGLE_CHECK_EQ(scc->visit_status.load(std::memory_order_relaxed),
|
801 |
+
SCCInfoBase::kRunning);
|
802 |
+
return;
|
803 |
+
}
|
804 |
+
InitProtobufDefaults();
|
805 |
+
mu.Lock();
|
806 |
+
runner.store(me, std::memory_order_relaxed);
|
807 |
+
InitSCC_DFS(scc);
|
808 |
+
runner.store(std::thread::id{}, std::memory_order_relaxed);
|
809 |
+
mu.Unlock();
|
810 |
+
}
|
811 |
+
|
812 |
+
} // namespace internal
|
813 |
+
} // namespace protobuf
|
814 |
+
} // namespace google
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena.h
ADDED
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// This file defines an Arena allocator for better allocation performance.
|
32 |
+
|
33 |
+
#ifndef GOOGLE_PROTOBUF_ARENA_H__
|
34 |
+
#define GOOGLE_PROTOBUF_ARENA_H__
|
35 |
+
|
36 |
+
#include <limits>
|
37 |
+
#ifdef max
|
38 |
+
#undef max // Visual Studio defines this macro
|
39 |
+
#endif
|
40 |
+
#if defined(_MSC_VER) && !defined(_LIBCPP_STD_VER) && !_HAS_EXCEPTIONS
|
41 |
+
// Work around bugs in MSVC <typeinfo> header when _HAS_EXCEPTIONS=0.
|
42 |
+
#include <exception>
|
43 |
+
#include <typeinfo>
|
44 |
+
namespace std {
|
45 |
+
using type_info = ::type_info;
|
46 |
+
}
|
47 |
+
#else
|
48 |
+
#include <typeinfo>
|
49 |
+
#endif
|
50 |
+
|
51 |
+
#include <google/protobuf/arena_impl.h>
|
52 |
+
#include <google/protobuf/stubs/port.h>
|
53 |
+
#include <type_traits>
|
54 |
+
|
55 |
+
namespace google {
|
56 |
+
namespace protobuf {
|
57 |
+
|
58 |
+
struct ArenaOptions; // defined below
|
59 |
+
|
60 |
+
} // namespace protobuf
|
61 |
+
|
62 |
+
namespace quality_webanswers {
|
63 |
+
|
64 |
+
void TempPrivateWorkAround(::google::protobuf::ArenaOptions* arena_options);
|
65 |
+
|
66 |
+
} // namespace quality_webanswers
|
67 |
+
|
68 |
+
namespace protobuf {
|
69 |
+
|
70 |
+
class Arena; // defined below
|
71 |
+
class Message; // defined in message.h
|
72 |
+
class MessageLite;
|
73 |
+
|
74 |
+
namespace arena_metrics {
|
75 |
+
|
76 |
+
void EnableArenaMetrics(::google::protobuf::ArenaOptions* options);
|
77 |
+
|
78 |
+
} // namespace arena_metrics
|
79 |
+
|
80 |
+
namespace internal {
|
81 |
+
|
82 |
+
struct ArenaStringPtr; // defined in arenastring.h
|
83 |
+
class LazyField; // defined in lazy_field.h
|
84 |
+
|
85 |
+
template <typename Type>
|
86 |
+
class GenericTypeHandler; // defined in repeated_field.h
|
87 |
+
|
88 |
+
// Templated cleanup methods.
|
89 |
+
template <typename T>
|
90 |
+
void arena_destruct_object(void* object) {
|
91 |
+
reinterpret_cast<T*>(object)->~T();
|
92 |
+
}
|
93 |
+
template <typename T>
|
94 |
+
void arena_delete_object(void* object) {
|
95 |
+
delete reinterpret_cast<T*>(object);
|
96 |
+
}
|
97 |
+
inline void arena_free(void* object, size_t size) {
|
98 |
+
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
|
99 |
+
::operator delete(object, size);
|
100 |
+
#else
|
101 |
+
(void)size;
|
102 |
+
::operator delete(object);
|
103 |
+
#endif
|
104 |
+
}
|
105 |
+
|
106 |
+
} // namespace internal
|
107 |
+
|
108 |
+
// ArenaOptions provides optional additional parameters to arena construction
|
109 |
+
// that control its block-allocation behavior.
|
110 |
+
struct ArenaOptions {
|
111 |
+
// This defines the size of the first block requested from the system malloc.
|
112 |
+
// Subsequent block sizes will increase in a geometric series up to a maximum.
|
113 |
+
size_t start_block_size;
|
114 |
+
|
115 |
+
// This defines the maximum block size requested from system malloc (unless an
|
116 |
+
// individual arena allocation request occurs with a size larger than this
|
117 |
+
// maximum). Requested block sizes increase up to this value, then remain
|
118 |
+
// here.
|
119 |
+
size_t max_block_size;
|
120 |
+
|
121 |
+
// An initial block of memory for the arena to use, or NULL for none. If
|
122 |
+
// provided, the block must live at least as long as the arena itself. The
|
123 |
+
// creator of the Arena retains ownership of the block after the Arena is
|
124 |
+
// destroyed.
|
125 |
+
char* initial_block;
|
126 |
+
|
127 |
+
// The size of the initial block, if provided.
|
128 |
+
size_t initial_block_size;
|
129 |
+
|
130 |
+
// A function pointer to an alloc method that returns memory blocks of size
|
131 |
+
// requested. By default, it contains a ptr to the malloc function.
|
132 |
+
//
|
133 |
+
// NOTE: block_alloc and dealloc functions are expected to behave like
|
134 |
+
// malloc and free, including Asan poisoning.
|
135 |
+
void* (*block_alloc)(size_t);
|
136 |
+
// A function pointer to a dealloc method that takes ownership of the blocks
|
137 |
+
// from the arena. By default, it contains a ptr to a wrapper function that
|
138 |
+
// calls free.
|
139 |
+
void (*block_dealloc)(void*, size_t);
|
140 |
+
|
141 |
+
ArenaOptions()
|
142 |
+
: start_block_size(kDefaultStartBlockSize),
|
143 |
+
max_block_size(kDefaultMaxBlockSize),
|
144 |
+
initial_block(NULL),
|
145 |
+
initial_block_size(0),
|
146 |
+
block_alloc(&::operator new),
|
147 |
+
block_dealloc(&internal::arena_free),
|
148 |
+
on_arena_init(NULL),
|
149 |
+
on_arena_reset(NULL),
|
150 |
+
on_arena_destruction(NULL),
|
151 |
+
on_arena_allocation(NULL) {}
|
152 |
+
|
153 |
+
private:
|
154 |
+
// Hooks for adding external functionality such as user-specific metrics
|
155 |
+
// collection, specific debugging abilities, etc.
|
156 |
+
// Init hook may return a pointer to a cookie to be stored in the arena.
|
157 |
+
// reset and destruction hooks will then be called with the same cookie
|
158 |
+
// pointer. This allows us to save an external object per arena instance and
|
159 |
+
// use it on the other hooks (Note: It is just as legal for init to return
|
160 |
+
// NULL and not use the cookie feature).
|
161 |
+
// on_arena_reset and on_arena_destruction also receive the space used in
|
162 |
+
// the arena just before the reset.
|
163 |
+
void* (*on_arena_init)(Arena* arena);
|
164 |
+
void (*on_arena_reset)(Arena* arena, void* cookie, uint64 space_used);
|
165 |
+
void (*on_arena_destruction)(Arena* arena, void* cookie, uint64 space_used);
|
166 |
+
|
167 |
+
// type_info is promised to be static - its lifetime extends to
|
168 |
+
// match program's lifetime (It is given by typeid operator).
|
169 |
+
// Note: typeid(void) will be passed as allocated_type every time we
|
170 |
+
// intentionally want to avoid monitoring an allocation. (i.e. internal
|
171 |
+
// allocations for managing the arena)
|
172 |
+
void (*on_arena_allocation)(const std::type_info* allocated_type,
|
173 |
+
uint64 alloc_size, void* cookie);
|
174 |
+
|
175 |
+
// Constants define default starting block size and max block size for
|
176 |
+
// arena allocator behavior -- see descriptions above.
|
177 |
+
static const size_t kDefaultStartBlockSize = 256;
|
178 |
+
static const size_t kDefaultMaxBlockSize = 8192;
|
179 |
+
|
180 |
+
friend void ::google::protobuf::arena_metrics::EnableArenaMetrics(ArenaOptions*);
|
181 |
+
friend void quality_webanswers::TempPrivateWorkAround(ArenaOptions*);
|
182 |
+
friend class Arena;
|
183 |
+
friend class ArenaOptionsTestFriend;
|
184 |
+
};
|
185 |
+
|
186 |
+
// Support for non-RTTI environments. (The metrics hooks API uses type
|
187 |
+
// information.)
|
188 |
+
#ifndef GOOGLE_PROTOBUF_NO_RTTI
|
189 |
+
#define RTTI_TYPE_ID(type) (&typeid(type))
|
190 |
+
#else
|
191 |
+
#define RTTI_TYPE_ID(type) (NULL)
|
192 |
+
#endif
|
193 |
+
|
194 |
+
// Arena allocator. Arena allocation replaces ordinary (heap-based) allocation
|
195 |
+
// with new/delete, and improves performance by aggregating allocations into
|
196 |
+
// larger blocks and freeing allocations all at once. Protocol messages are
|
197 |
+
// allocated on an arena by using Arena::CreateMessage<T>(Arena*), below, and
|
198 |
+
// are automatically freed when the arena is destroyed.
|
199 |
+
//
|
200 |
+
// This is a thread-safe implementation: multiple threads may allocate from the
|
201 |
+
// arena concurrently. Destruction is not thread-safe and the destructing
|
202 |
+
// thread must synchronize with users of the arena first.
|
203 |
+
//
|
204 |
+
// An arena provides two allocation interfaces: CreateMessage<T>, which works
|
205 |
+
// for arena-enabled proto2 message types as well as other types that satisfy
|
206 |
+
// the appropriate protocol (described below), and Create<T>, which works for
|
207 |
+
// any arbitrary type T. CreateMessage<T> is better when the type T supports it,
|
208 |
+
// because this interface (i) passes the arena pointer to the created object so
|
209 |
+
// that its sub-objects and internal allocations can use the arena too, and (ii)
|
210 |
+
// elides the object's destructor call when possible. Create<T> does not place
|
211 |
+
// any special requirements on the type T, and will invoke the object's
|
212 |
+
// destructor when the arena is destroyed.
|
213 |
+
//
|
214 |
+
// The arena message allocation protocol, required by CreateMessage<T>, is as
|
215 |
+
// follows:
|
216 |
+
//
|
217 |
+
// - The type T must have (at least) two constructors: a constructor with no
|
218 |
+
// arguments, called when a T is allocated on the heap; and a constructor with
|
219 |
+
// a google::protobuf::Arena* argument, called when a T is allocated on an arena. If the
|
220 |
+
// second constructor is called with a NULL arena pointer, it must be
|
221 |
+
// equivalent to invoking the first (no-argument) constructor.
|
222 |
+
//
|
223 |
+
// - The type T must have a particular type trait: a nested type
|
224 |
+
// |InternalArenaConstructable_|. This is usually a typedef to |void|. If no
|
225 |
+
// such type trait exists, then the instantiation CreateMessage<T> will fail
|
226 |
+
// to compile.
|
227 |
+
//
|
228 |
+
// - The type T *may* have the type trait |DestructorSkippable_|. If this type
|
229 |
+
// trait is present in the type, then its destructor will not be called if and
|
230 |
+
// only if it was passed a non-NULL arena pointer. If this type trait is not
|
231 |
+
// present on the type, then its destructor is always called when the
|
232 |
+
// containing arena is destroyed.
|
233 |
+
//
|
234 |
+
// - One- and two-user-argument forms of CreateMessage<T>() also exist that
|
235 |
+
// forward these constructor arguments to T's constructor: for example,
|
236 |
+
// CreateMessage<T>(Arena*, arg1, arg2) forwards to a constructor T(Arena*,
|
237 |
+
// arg1, arg2).
|
238 |
+
//
|
239 |
+
// This protocol is implemented by all arena-enabled proto2 message classes as
|
240 |
+
// well as RepeatedPtrField.
|
241 |
+
//
|
242 |
+
// Do NOT subclass Arena. This class will be marked as final when C++11 is
|
243 |
+
// enabled.
|
244 |
+
class LIBPROTOBUF_EXPORT Arena {
|
245 |
+
public:
|
246 |
+
// Arena constructor taking custom options. See ArenaOptions below for
|
247 |
+
// descriptions of the options available.
|
248 |
+
explicit Arena(const ArenaOptions& options) : impl_(options) {
|
249 |
+
Init(options);
|
250 |
+
}
|
251 |
+
|
252 |
+
// Block overhead. Use this as a guide for how much to over-allocate the
|
253 |
+
// initial block if you want an allocation of size N to fit inside it.
|
254 |
+
//
|
255 |
+
// WARNING: if you allocate multiple objects, it is difficult to guarantee
|
256 |
+
// that a series of allocations will fit in the initial block, especially if
|
257 |
+
// Arena changes its alignment guarantees in the future!
|
258 |
+
static const size_t kBlockOverhead = internal::ArenaImpl::kBlockHeaderSize +
|
259 |
+
internal::ArenaImpl::kSerialArenaSize;
|
260 |
+
|
261 |
+
// Default constructor with sensible default options, tuned for average
|
262 |
+
// use-cases.
|
263 |
+
Arena() : impl_(ArenaOptions()) { Init(ArenaOptions()); }
|
264 |
+
|
265 |
+
~Arena() {
|
266 |
+
if (hooks_cookie_) {
|
267 |
+
CallDestructorHooks();
|
268 |
+
}
|
269 |
+
}
|
270 |
+
|
271 |
+
void Init(const ArenaOptions& options) {
|
272 |
+
on_arena_allocation_ = options.on_arena_allocation;
|
273 |
+
on_arena_reset_ = options.on_arena_reset;
|
274 |
+
on_arena_destruction_ = options.on_arena_destruction;
|
275 |
+
// Call the initialization hook
|
276 |
+
if (options.on_arena_init != NULL) {
|
277 |
+
hooks_cookie_ = options.on_arena_init(this);
|
278 |
+
} else {
|
279 |
+
hooks_cookie_ = NULL;
|
280 |
+
}
|
281 |
+
}
|
282 |
+
|
283 |
+
// API to create proto2 message objects on the arena. If the arena passed in
|
284 |
+
// is NULL, then a heap allocated object is returned. Type T must be a message
|
285 |
+
// defined in a .proto file with cc_enable_arenas set to true, otherwise a
|
286 |
+
// compilation error will occur.
|
287 |
+
//
|
288 |
+
// RepeatedField and RepeatedPtrField may also be instantiated directly on an
|
289 |
+
// arena with this method.
|
290 |
+
//
|
291 |
+
// This function also accepts any type T that satisfies the arena message
|
292 |
+
// allocation protocol, documented above.
|
293 |
+
template <typename T, typename... Args>
|
294 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessage(
|
295 |
+
Arena* arena, Args&&... args) {
|
296 |
+
static_assert(
|
297 |
+
InternalHelper<T>::is_arena_constructable::value,
|
298 |
+
"CreateMessage can only construct types that are ArenaConstructable");
|
299 |
+
// We must delegate to CreateMaybeMessage() and NOT CreateMessageInternal()
|
300 |
+
// because protobuf generated classes specialize CreateMaybeMessage() and we
|
301 |
+
// need to use that specialization for code size reasons.
|
302 |
+
return Arena::CreateMaybeMessage<T>(arena, std::forward<Args>(args)...);
|
303 |
+
}
|
304 |
+
|
305 |
+
// API to create any objects on the arena. Note that only the object will
|
306 |
+
// be created on the arena; the underlying ptrs (in case of a proto2 message)
|
307 |
+
// will be still heap allocated. Proto messages should usually be allocated
|
308 |
+
// with CreateMessage<T>() instead.
|
309 |
+
//
|
310 |
+
// Note that even if T satisfies the arena message construction protocol
|
311 |
+
// (InternalArenaConstructable_ trait and optional DestructorSkippable_
|
312 |
+
// trait), as described above, this function does not follow the protocol;
|
313 |
+
// instead, it treats T as a black-box type, just as if it did not have these
|
314 |
+
// traits. Specifically, T's constructor arguments will always be only those
|
315 |
+
// passed to Create<T>() -- no additional arena pointer is implicitly added.
|
316 |
+
// Furthermore, the destructor will always be called at arena destruction time
|
317 |
+
// (unless the destructor is trivial). Hence, from T's point of view, it is as
|
318 |
+
// if the object were allocated on the heap (except that the underlying memory
|
319 |
+
// is obtained from the arena).
|
320 |
+
template <typename T, typename... Args>
|
321 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* Create(Arena* arena,
|
322 |
+
Args&&... args) {
|
323 |
+
return CreateNoMessage<T>(arena, is_arena_constructable<T>(),
|
324 |
+
std::forward<Args>(args)...);
|
325 |
+
}
|
326 |
+
|
327 |
+
// Create an array of object type T on the arena *without* invoking the
|
328 |
+
// constructor of T. If `arena` is null, then the return value should be freed
|
329 |
+
// with `delete[] x;` (or `::operator delete[](x);`).
|
330 |
+
// To ensure safe uses, this function checks at compile time
|
331 |
+
// (when compiled as C++11) that T is trivially default-constructible and
|
332 |
+
// trivially destructible.
|
333 |
+
template <typename T>
|
334 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateArray(
|
335 |
+
Arena* arena, size_t num_elements) {
|
336 |
+
static_assert(std::is_pod<T>::value,
|
337 |
+
"CreateArray requires a trivially constructible type");
|
338 |
+
static_assert(std::is_trivially_destructible<T>::value,
|
339 |
+
"CreateArray requires a trivially destructible type");
|
340 |
+
GOOGLE_CHECK_LE(num_elements, std::numeric_limits<size_t>::max() / sizeof(T))
|
341 |
+
<< "Requested size is too large to fit into size_t.";
|
342 |
+
if (arena == NULL) {
|
343 |
+
return static_cast<T*>(::operator new[](num_elements * sizeof(T)));
|
344 |
+
} else {
|
345 |
+
return arena->CreateInternalRawArray<T>(num_elements);
|
346 |
+
}
|
347 |
+
}
|
348 |
+
|
349 |
+
// Returns the total space allocated by the arena, which is the sum of the
|
350 |
+
// sizes of the underlying blocks. This method is relatively fast; a counter
|
351 |
+
// is kept as blocks are allocated.
|
352 |
+
uint64 SpaceAllocated() const { return impl_.SpaceAllocated(); }
|
353 |
+
// Returns the total space used by the arena. Similar to SpaceAllocated but
|
354 |
+
// does not include free space and block overhead. The total space returned
|
355 |
+
// may not include space used by other threads executing concurrently with
|
356 |
+
// the call to this method.
|
357 |
+
uint64 SpaceUsed() const { return impl_.SpaceUsed(); }
|
358 |
+
// DEPRECATED. Please use SpaceAllocated() and SpaceUsed().
|
359 |
+
//
|
360 |
+
// Combines SpaceAllocated and SpaceUsed. Returns a pair of
|
361 |
+
// <space_allocated, space_used>.
|
362 |
+
PROTOBUF_RUNTIME_DEPRECATED("Please use SpaceAllocated() and SpaceUsed()")
|
363 |
+
std::pair<uint64, uint64> SpaceAllocatedAndUsed() const {
|
364 |
+
return std::make_pair(SpaceAllocated(), SpaceUsed());
|
365 |
+
}
|
366 |
+
|
367 |
+
// Frees all storage allocated by this arena after calling destructors
|
368 |
+
// registered with OwnDestructor() and freeing objects registered with Own().
|
369 |
+
// Any objects allocated on this arena are unusable after this call. It also
|
370 |
+
// returns the total space used by the arena which is the sums of the sizes
|
371 |
+
// of the allocated blocks. This method is not thread-safe.
|
372 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE uint64 Reset() {
|
373 |
+
// Call the reset hook
|
374 |
+
if (on_arena_reset_ != NULL) {
|
375 |
+
on_arena_reset_(this, hooks_cookie_, impl_.SpaceAllocated());
|
376 |
+
}
|
377 |
+
return impl_.Reset();
|
378 |
+
}
|
379 |
+
|
380 |
+
// Adds |object| to a list of heap-allocated objects to be freed with |delete|
|
381 |
+
// when the arena is destroyed or reset.
|
382 |
+
template <typename T>
|
383 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void Own(T* object) {
|
384 |
+
OwnInternal(object, std::is_convertible<T*, Message*>());
|
385 |
+
}
|
386 |
+
|
387 |
+
// Adds |object| to a list of objects whose destructors will be manually
|
388 |
+
// called when the arena is destroyed or reset. This differs from Own() in
|
389 |
+
// that it does not free the underlying memory with |delete|; hence, it is
|
390 |
+
// normally only used for objects that are placement-newed into
|
391 |
+
// arena-allocated memory.
|
392 |
+
template <typename T>
|
393 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void OwnDestructor(T* object) {
|
394 |
+
if (object != NULL) {
|
395 |
+
impl_.AddCleanup(object, &internal::arena_destruct_object<T>);
|
396 |
+
}
|
397 |
+
}
|
398 |
+
|
399 |
+
// Adds a custom member function on an object to the list of destructors that
|
400 |
+
// will be manually called when the arena is destroyed or reset. This differs
|
401 |
+
// from OwnDestructor() in that any member function may be specified, not only
|
402 |
+
// the class destructor.
|
403 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE void OwnCustomDestructor(
|
404 |
+
void* object, void (*destruct)(void*)) {
|
405 |
+
impl_.AddCleanup(object, destruct);
|
406 |
+
}
|
407 |
+
|
408 |
+
// Retrieves the arena associated with |value| if |value| is an arena-capable
|
409 |
+
// message, or NULL otherwise. This differs from value->GetArena() in that the
|
410 |
+
// latter is a virtual call, while this method is a templated call that
|
411 |
+
// resolves at compile-time.
|
412 |
+
template <typename T>
|
413 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArena(
|
414 |
+
const T* value) {
|
415 |
+
return GetArenaInternal(value, is_arena_constructable<T>());
|
416 |
+
}
|
417 |
+
|
418 |
+
template <typename T>
|
419 |
+
class InternalHelper {
|
420 |
+
template <typename U>
|
421 |
+
static char DestructorSkippable(const typename U::DestructorSkippable_*);
|
422 |
+
template <typename U>
|
423 |
+
static double DestructorSkippable(...);
|
424 |
+
|
425 |
+
typedef std::integral_constant<
|
426 |
+
bool, sizeof(DestructorSkippable<T>(static_cast<const T*>(0))) ==
|
427 |
+
sizeof(char) ||
|
428 |
+
std::is_trivially_destructible<T>::value>
|
429 |
+
is_destructor_skippable;
|
430 |
+
|
431 |
+
template <typename U>
|
432 |
+
static char ArenaConstructable(
|
433 |
+
const typename U::InternalArenaConstructable_*);
|
434 |
+
template <typename U>
|
435 |
+
static double ArenaConstructable(...);
|
436 |
+
|
437 |
+
typedef std::integral_constant<bool, sizeof(ArenaConstructable<T>(
|
438 |
+
static_cast<const T*>(0))) ==
|
439 |
+
sizeof(char)>
|
440 |
+
is_arena_constructable;
|
441 |
+
|
442 |
+
template <typename... Args>
|
443 |
+
static T* Construct(void* ptr, Args&&... args) {
|
444 |
+
return new (ptr) T(std::forward<Args>(args)...);
|
445 |
+
}
|
446 |
+
|
447 |
+
static Arena* GetArena(const T* p) { return p->GetArenaNoVirtual(); }
|
448 |
+
|
449 |
+
friend class Arena;
|
450 |
+
};
|
451 |
+
|
452 |
+
// Helper typetraits that indicates support for arenas in a type T at compile
|
453 |
+
// time. This is public only to allow construction of higher-level templated
|
454 |
+
// utilities.
|
455 |
+
//
|
456 |
+
// is_arena_constructable<T>::value is true if the message type T has arena
|
457 |
+
// support enabled, and false otherwise.
|
458 |
+
//
|
459 |
+
// is_destructor_skippable<T>::value is true if the message type T has told
|
460 |
+
// the arena that it is safe to skip the destructor, and false otherwise.
|
461 |
+
//
|
462 |
+
// This is inside Arena because only Arena has the friend relationships
|
463 |
+
// necessary to see the underlying generated code traits.
|
464 |
+
template <typename T>
|
465 |
+
struct is_arena_constructable : InternalHelper<T>::is_arena_constructable {};
|
466 |
+
template <typename T>
|
467 |
+
struct is_destructor_skippable : InternalHelper<T>::is_destructor_skippable {
|
468 |
+
};
|
469 |
+
|
470 |
+
private:
|
471 |
+
template <typename T, typename... Args>
|
472 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessageInternal(
|
473 |
+
Arena* arena, Args&&... args) {
|
474 |
+
static_assert(
|
475 |
+
InternalHelper<T>::is_arena_constructable::value,
|
476 |
+
"CreateMessage can only construct types that are ArenaConstructable");
|
477 |
+
if (arena == NULL) {
|
478 |
+
return new T(nullptr, std::forward<Args>(args)...);
|
479 |
+
} else {
|
480 |
+
return arena->DoCreateMessage<T>(std::forward<Args>(args)...);
|
481 |
+
}
|
482 |
+
}
|
483 |
+
|
484 |
+
// This specialization for no arguments is necessary, because its behavior is
|
485 |
+
// slightly different. When the arena pointer is nullptr, it calls T()
|
486 |
+
// instead of T(nullptr).
|
487 |
+
template <typename T>
|
488 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMessageInternal(
|
489 |
+
Arena* arena) {
|
490 |
+
static_assert(
|
491 |
+
InternalHelper<T>::is_arena_constructable::value,
|
492 |
+
"CreateMessage can only construct types that are ArenaConstructable");
|
493 |
+
if (arena == NULL) {
|
494 |
+
return new T();
|
495 |
+
} else {
|
496 |
+
return arena->DoCreateMessage<T>();
|
497 |
+
}
|
498 |
+
}
|
499 |
+
|
500 |
+
template <typename T, typename... Args>
|
501 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateInternal(
|
502 |
+
Arena* arena, Args&&... args) {
|
503 |
+
if (arena == NULL) {
|
504 |
+
return new T(std::forward<Args>(args)...);
|
505 |
+
} else {
|
506 |
+
return arena->DoCreate<T>(std::is_trivially_destructible<T>::value,
|
507 |
+
std::forward<Args>(args)...);
|
508 |
+
}
|
509 |
+
}
|
510 |
+
|
511 |
+
void CallDestructorHooks();
|
512 |
+
void OnArenaAllocation(const std::type_info* allocated_type, size_t n) const;
|
513 |
+
inline void AllocHook(const std::type_info* allocated_type, size_t n) const {
|
514 |
+
if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL)) {
|
515 |
+
OnArenaAllocation(allocated_type, n);
|
516 |
+
}
|
517 |
+
}
|
518 |
+
|
519 |
+
// Allocate and also optionally call on_arena_allocation callback with the
|
520 |
+
// allocated type info when the hooks are in place in ArenaOptions and
|
521 |
+
// the cookie is not null.
|
522 |
+
template <typename T>
|
523 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void* AllocateInternal(
|
524 |
+
bool skip_explicit_ownership) {
|
525 |
+
const size_t n = internal::AlignUpTo8(sizeof(T));
|
526 |
+
AllocHook(RTTI_TYPE_ID(T), n);
|
527 |
+
// Monitor allocation if needed.
|
528 |
+
if (skip_explicit_ownership) {
|
529 |
+
return impl_.AllocateAligned(n);
|
530 |
+
} else {
|
531 |
+
return impl_.AllocateAlignedAndAddCleanup(
|
532 |
+
n, &internal::arena_destruct_object<T>);
|
533 |
+
}
|
534 |
+
}
|
535 |
+
|
536 |
+
// CreateMessage<T> requires that T supports arenas, but this private method
|
537 |
+
// works whether or not T supports arenas. These are not exposed to user code
|
538 |
+
// as it can cause confusing API usages, and end up having double free in
|
539 |
+
// user code. These are used only internally from LazyField and Repeated
|
540 |
+
// fields, since they are designed to work in all mode combinations.
|
541 |
+
template <typename Msg, typename... Args>
|
542 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Msg* DoCreateMaybeMessage(
|
543 |
+
Arena* arena, std::true_type, Args&&... args) {
|
544 |
+
return CreateMessageInternal<Msg>(arena, std::forward<Args>(args)...);
|
545 |
+
}
|
546 |
+
|
547 |
+
template <typename T, typename... Args>
|
548 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* DoCreateMaybeMessage(
|
549 |
+
Arena* arena, std::false_type, Args&&... args) {
|
550 |
+
return CreateInternal<T>(arena, std::forward<Args>(args)...);
|
551 |
+
}
|
552 |
+
|
553 |
+
template <typename T, typename... Args>
|
554 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateMaybeMessage(
|
555 |
+
Arena* arena, Args&&... args) {
|
556 |
+
return DoCreateMaybeMessage<T>(arena, is_arena_constructable<T>(),
|
557 |
+
std::forward<Args>(args)...);
|
558 |
+
}
|
559 |
+
|
560 |
+
template <typename T, typename... Args>
|
561 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateNoMessage(
|
562 |
+
Arena* arena, std::true_type, Args&&... args) {
|
563 |
+
// User is constructing with Create() despite the fact that T supports arena
|
564 |
+
// construction. In this case we have to delegate to CreateInternal(), and
|
565 |
+
// we can't use any CreateMaybeMessage() specialization that may be defined.
|
566 |
+
return CreateInternal<T>(arena, std::forward<Args>(args)...);
|
567 |
+
}
|
568 |
+
|
569 |
+
template <typename T, typename... Args>
|
570 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static T* CreateNoMessage(
|
571 |
+
Arena* arena, std::false_type, Args&&... args) {
|
572 |
+
// User is constructing with Create() and the type does not support arena
|
573 |
+
// construction. In this case we can delegate to CreateMaybeMessage() and
|
574 |
+
// use any specialization that may be available for that.
|
575 |
+
return CreateMaybeMessage<T>(arena, std::forward<Args>(args)...);
|
576 |
+
}
|
577 |
+
|
578 |
+
// Just allocate the required size for the given type assuming the
|
579 |
+
// type has a trivial constructor.
|
580 |
+
template <typename T>
|
581 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* CreateInternalRawArray(
|
582 |
+
size_t num_elements) {
|
583 |
+
GOOGLE_CHECK_LE(num_elements, std::numeric_limits<size_t>::max() / sizeof(T))
|
584 |
+
<< "Requested size is too large to fit into size_t.";
|
585 |
+
const size_t n = internal::AlignUpTo8(sizeof(T) * num_elements);
|
586 |
+
// Monitor allocation if needed.
|
587 |
+
AllocHook(RTTI_TYPE_ID(T), n);
|
588 |
+
return static_cast<T*>(impl_.AllocateAligned(n));
|
589 |
+
}
|
590 |
+
|
591 |
+
template <typename T, typename... Args>
|
592 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* DoCreate(
|
593 |
+
bool skip_explicit_ownership, Args&&... args) {
|
594 |
+
return new (AllocateInternal<T>(skip_explicit_ownership))
|
595 |
+
T(std::forward<Args>(args)...);
|
596 |
+
}
|
597 |
+
template <typename T, typename... Args>
|
598 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* DoCreateMessage(Args&&... args) {
|
599 |
+
return InternalHelper<T>::Construct(
|
600 |
+
AllocateInternal<T>(InternalHelper<T>::is_destructor_skippable::value),
|
601 |
+
this, std::forward<Args>(args)...);
|
602 |
+
}
|
603 |
+
|
604 |
+
// CreateInArenaStorage is used to implement map field. Without it,
|
605 |
+
// google::protobuf::Map need to call generated message's protected arena constructor,
|
606 |
+
// which needs to declare google::protobuf::Map as friend of generated message.
|
607 |
+
template <typename T>
|
608 |
+
static void CreateInArenaStorage(T* ptr, Arena* arena) {
|
609 |
+
CreateInArenaStorageInternal(ptr, arena,
|
610 |
+
typename is_arena_constructable<T>::type());
|
611 |
+
RegisterDestructorInternal(
|
612 |
+
ptr, arena,
|
613 |
+
typename InternalHelper<T>::is_destructor_skippable::type());
|
614 |
+
}
|
615 |
+
|
616 |
+
template <typename T>
|
617 |
+
static void CreateInArenaStorageInternal(T* ptr, Arena* arena,
|
618 |
+
std::true_type) {
|
619 |
+
InternalHelper<T>::Construct(ptr, arena);
|
620 |
+
}
|
621 |
+
template <typename T>
|
622 |
+
static void CreateInArenaStorageInternal(T* ptr, Arena* /* arena */,
|
623 |
+
std::false_type) {
|
624 |
+
new (ptr) T();
|
625 |
+
}
|
626 |
+
|
627 |
+
template <typename T>
|
628 |
+
static void RegisterDestructorInternal(T* /* ptr */, Arena* /* arena */,
|
629 |
+
std::true_type) {}
|
630 |
+
template <typename T>
|
631 |
+
static void RegisterDestructorInternal(T* ptr, Arena* arena,
|
632 |
+
std::false_type) {
|
633 |
+
arena->OwnDestructor(ptr);
|
634 |
+
}
|
635 |
+
|
636 |
+
// These implement Own(), which registers an object for deletion (destructor
|
637 |
+
// call and operator delete()). The second parameter has type 'true_type' if T
|
638 |
+
// is a subtype of ::google::protobuf::Message and 'false_type' otherwise. Collapsing
|
639 |
+
// all template instantiations to one for generic Message reduces code size,
|
640 |
+
// using the virtual destructor instead.
|
641 |
+
template <typename T>
|
642 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void OwnInternal(T* object,
|
643 |
+
std::true_type) {
|
644 |
+
if (object != NULL) {
|
645 |
+
impl_.AddCleanup(object, &internal::arena_delete_object<Message>);
|
646 |
+
}
|
647 |
+
}
|
648 |
+
template <typename T>
|
649 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void OwnInternal(T* object,
|
650 |
+
std::false_type) {
|
651 |
+
if (object != NULL) {
|
652 |
+
impl_.AddCleanup(object, &internal::arena_delete_object<T>);
|
653 |
+
}
|
654 |
+
}
|
655 |
+
|
656 |
+
// Implementation for GetArena(). Only message objects with
|
657 |
+
// InternalArenaConstructable_ tags can be associated with an arena, and such
|
658 |
+
// objects must implement a GetArenaNoVirtual() method.
|
659 |
+
template <typename T>
|
660 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArenaInternal(
|
661 |
+
const T* value, std::true_type) {
|
662 |
+
return InternalHelper<T>::GetArena(value);
|
663 |
+
}
|
664 |
+
|
665 |
+
template <typename T>
|
666 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE static Arena* GetArenaInternal(
|
667 |
+
const T* /* value */, std::false_type) {
|
668 |
+
return NULL;
|
669 |
+
}
|
670 |
+
|
671 |
+
// For friends of arena.
|
672 |
+
void* AllocateAligned(size_t n) {
|
673 |
+
AllocHook(NULL, n);
|
674 |
+
return impl_.AllocateAligned(internal::AlignUpTo8(n));
|
675 |
+
}
|
676 |
+
|
677 |
+
internal::ArenaImpl impl_;
|
678 |
+
|
679 |
+
void (*on_arena_allocation_)(const std::type_info* allocated_type,
|
680 |
+
uint64 alloc_size, void* cookie);
|
681 |
+
void (*on_arena_reset_)(Arena* arena, void* cookie, uint64 space_used);
|
682 |
+
void (*on_arena_destruction_)(Arena* arena, void* cookie, uint64 space_used);
|
683 |
+
|
684 |
+
// The arena may save a cookie it receives from the external on_init hook
|
685 |
+
// and then use it when calling the on_reset and on_destruction hooks.
|
686 |
+
void* hooks_cookie_;
|
687 |
+
|
688 |
+
template <typename Type>
|
689 |
+
friend class internal::GenericTypeHandler;
|
690 |
+
friend struct internal::ArenaStringPtr; // For AllocateAligned.
|
691 |
+
friend class internal::LazyField; // For CreateMaybeMessage.
|
692 |
+
friend class MessageLite;
|
693 |
+
template <typename Key, typename T>
|
694 |
+
friend class Map;
|
695 |
+
};
|
696 |
+
|
697 |
+
// Defined above for supporting environments without RTTI.
|
698 |
+
#undef RTTI_TYPE_ID
|
699 |
+
|
700 |
+
} // namespace protobuf
|
701 |
+
|
702 |
+
} // namespace google
|
703 |
+
#endif // GOOGLE_PROTOBUF_ARENA_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arena_impl.h
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// This file defines an Arena allocator for better allocation performance.
|
32 |
+
|
33 |
+
#ifndef GOOGLE_PROTOBUF_ARENA_IMPL_H__
|
34 |
+
#define GOOGLE_PROTOBUF_ARENA_IMPL_H__
|
35 |
+
|
36 |
+
#include <atomic>
|
37 |
+
#include <limits>
|
38 |
+
|
39 |
+
#include <google/protobuf/stubs/common.h>
|
40 |
+
#include <google/protobuf/stubs/logging.h>
|
41 |
+
|
42 |
+
#include <google/protobuf/stubs/port.h>
|
43 |
+
|
44 |
+
#ifdef ADDRESS_SANITIZER
|
45 |
+
#include <sanitizer/asan_interface.h>
|
46 |
+
#endif // ADDRESS_SANITIZER
|
47 |
+
|
48 |
+
namespace google {
|
49 |
+
|
50 |
+
namespace protobuf {
|
51 |
+
namespace internal {
|
52 |
+
|
53 |
+
inline size_t AlignUpTo8(size_t n) {
|
54 |
+
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
|
55 |
+
return (n + 7) & -8;
|
56 |
+
}
|
57 |
+
|
58 |
+
// This class provides the core Arena memory allocation library. Different
|
59 |
+
// implementations only need to implement the public interface below.
|
60 |
+
// Arena is not a template type as that would only be useful if all protos
|
61 |
+
// in turn would be templates, which will/cannot happen. However separating
|
62 |
+
// the memory allocation part from the cruft of the API users expect we can
|
63 |
+
// use #ifdef the select the best implementation based on hardware / OS.
|
64 |
+
class LIBPROTOBUF_EXPORT ArenaImpl {
|
65 |
+
public:
|
66 |
+
struct Options {
|
67 |
+
size_t start_block_size;
|
68 |
+
size_t max_block_size;
|
69 |
+
char* initial_block;
|
70 |
+
size_t initial_block_size;
|
71 |
+
void* (*block_alloc)(size_t);
|
72 |
+
void (*block_dealloc)(void*, size_t);
|
73 |
+
|
74 |
+
template <typename O>
|
75 |
+
explicit Options(const O& options)
|
76 |
+
: start_block_size(options.start_block_size),
|
77 |
+
max_block_size(options.max_block_size),
|
78 |
+
initial_block(options.initial_block),
|
79 |
+
initial_block_size(options.initial_block_size),
|
80 |
+
block_alloc(options.block_alloc),
|
81 |
+
block_dealloc(options.block_dealloc) {}
|
82 |
+
};
|
83 |
+
|
84 |
+
template <typename O>
|
85 |
+
explicit ArenaImpl(const O& options) : options_(options) {
|
86 |
+
if (options_.initial_block != NULL && options_.initial_block_size > 0) {
|
87 |
+
GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block))
|
88 |
+
<< ": Initial block size too small for header.";
|
89 |
+
initial_block_ = reinterpret_cast<Block*>(options_.initial_block);
|
90 |
+
} else {
|
91 |
+
initial_block_ = NULL;
|
92 |
+
}
|
93 |
+
|
94 |
+
Init();
|
95 |
+
}
|
96 |
+
|
97 |
+
// Destructor deletes all owned heap allocated objects, and destructs objects
|
98 |
+
// that have non-trivial destructors, except for proto2 message objects whose
|
99 |
+
// destructors can be skipped. Also, frees all blocks except the initial block
|
100 |
+
// if it was passed in.
|
101 |
+
~ArenaImpl();
|
102 |
+
|
103 |
+
uint64 Reset();
|
104 |
+
|
105 |
+
uint64 SpaceAllocated() const;
|
106 |
+
uint64 SpaceUsed() const;
|
107 |
+
|
108 |
+
void* AllocateAligned(size_t n);
|
109 |
+
|
110 |
+
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*));
|
111 |
+
|
112 |
+
// Add object pointer and cleanup function pointer to the list.
|
113 |
+
void AddCleanup(void* elem, void (*cleanup)(void*));
|
114 |
+
|
115 |
+
private:
|
116 |
+
void* AllocateAlignedFallback(size_t n);
|
117 |
+
void* AllocateAlignedAndAddCleanupFallback(size_t n, void (*cleanup)(void*));
|
118 |
+
void AddCleanupFallback(void* elem, void (*cleanup)(void*));
|
119 |
+
|
120 |
+
// Node contains the ptr of the object to be cleaned up and the associated
|
121 |
+
// cleanup function ptr.
|
122 |
+
struct CleanupNode {
|
123 |
+
void* elem; // Pointer to the object to be cleaned up.
|
124 |
+
void (*cleanup)(void*); // Function pointer to the destructor or deleter.
|
125 |
+
};
|
126 |
+
|
127 |
+
// Cleanup uses a chunked linked list, to reduce pointer chasing.
|
128 |
+
struct CleanupChunk {
|
129 |
+
static size_t SizeOf(size_t i) {
|
130 |
+
return sizeof(CleanupChunk) + (sizeof(CleanupNode) * (i - 1));
|
131 |
+
}
|
132 |
+
size_t size; // Total elements in the list.
|
133 |
+
CleanupChunk* next; // Next node in the list.
|
134 |
+
CleanupNode nodes[1]; // True length is |size|.
|
135 |
+
};
|
136 |
+
|
137 |
+
class Block;
|
138 |
+
|
139 |
+
// A thread-unsafe Arena that can only be used within its owning thread.
|
140 |
+
class LIBPROTOBUF_EXPORT SerialArena {
|
141 |
+
public:
|
142 |
+
// The allocate/free methods here are a little strange, since SerialArena is
|
143 |
+
// allocated inside a Block which it also manages. This is to avoid doing
|
144 |
+
// an extra allocation for the SerialArena itself.
|
145 |
+
|
146 |
+
// Creates a new SerialArena inside Block* and returns it.
|
147 |
+
static SerialArena* New(Block* b, void* owner, ArenaImpl* arena);
|
148 |
+
|
149 |
+
// Destroys this SerialArena, freeing all blocks with the given dealloc
|
150 |
+
// function, except any block equal to |initial_block|.
|
151 |
+
static uint64 Free(SerialArena* serial, Block* initial_block,
|
152 |
+
void (*block_dealloc)(void*, size_t));
|
153 |
+
|
154 |
+
void CleanupList();
|
155 |
+
uint64 SpaceUsed() const;
|
156 |
+
|
157 |
+
void* AllocateAligned(size_t n) {
|
158 |
+
GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned.
|
159 |
+
GOOGLE_DCHECK_GE(limit_, ptr_);
|
160 |
+
if (GOOGLE_PREDICT_FALSE(static_cast<size_t>(limit_ - ptr_) < n)) {
|
161 |
+
return AllocateAlignedFallback(n);
|
162 |
+
}
|
163 |
+
void* ret = ptr_;
|
164 |
+
ptr_ += n;
|
165 |
+
#ifdef ADDRESS_SANITIZER
|
166 |
+
ASAN_UNPOISON_MEMORY_REGION(ret, n);
|
167 |
+
#endif // ADDRESS_SANITIZER
|
168 |
+
return ret;
|
169 |
+
}
|
170 |
+
|
171 |
+
void AddCleanup(void* elem, void (*cleanup)(void*)) {
|
172 |
+
if (GOOGLE_PREDICT_FALSE(cleanup_ptr_ == cleanup_limit_)) {
|
173 |
+
AddCleanupFallback(elem, cleanup);
|
174 |
+
return;
|
175 |
+
}
|
176 |
+
cleanup_ptr_->elem = elem;
|
177 |
+
cleanup_ptr_->cleanup = cleanup;
|
178 |
+
cleanup_ptr_++;
|
179 |
+
}
|
180 |
+
|
181 |
+
void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)) {
|
182 |
+
void* ret = AllocateAligned(n);
|
183 |
+
AddCleanup(ret, cleanup);
|
184 |
+
return ret;
|
185 |
+
}
|
186 |
+
|
187 |
+
void* owner() const { return owner_; }
|
188 |
+
SerialArena* next() const { return next_; }
|
189 |
+
void set_next(SerialArena* next) { next_ = next; }
|
190 |
+
|
191 |
+
private:
|
192 |
+
void* AllocateAlignedFallback(size_t n);
|
193 |
+
void AddCleanupFallback(void* elem, void (*cleanup)(void*));
|
194 |
+
void CleanupListFallback();
|
195 |
+
|
196 |
+
ArenaImpl* arena_; // Containing arena.
|
197 |
+
void* owner_; // &ThreadCache of this thread;
|
198 |
+
Block* head_; // Head of linked list of blocks.
|
199 |
+
CleanupChunk* cleanup_; // Head of cleanup list.
|
200 |
+
SerialArena* next_; // Next SerialArena in this linked list.
|
201 |
+
|
202 |
+
// Next pointer to allocate from. Always 8-byte aligned. Points inside
|
203 |
+
// head_ (and head_->pos will always be non-canonical). We keep these
|
204 |
+
// here to reduce indirection.
|
205 |
+
char* ptr_;
|
206 |
+
char* limit_;
|
207 |
+
|
208 |
+
// Next CleanupList members to append to. These point inside cleanup_.
|
209 |
+
CleanupNode* cleanup_ptr_;
|
210 |
+
CleanupNode* cleanup_limit_;
|
211 |
+
};
|
212 |
+
|
213 |
+
// Blocks are variable length malloc-ed objects. The following structure
|
214 |
+
// describes the common header for all blocks.
|
215 |
+
class LIBPROTOBUF_EXPORT Block {
|
216 |
+
public:
|
217 |
+
Block(size_t size, Block* next);
|
218 |
+
|
219 |
+
char* Pointer(size_t n) {
|
220 |
+
GOOGLE_DCHECK(n <= size_);
|
221 |
+
return reinterpret_cast<char*>(this) + n;
|
222 |
+
}
|
223 |
+
|
224 |
+
Block* next() const { return next_; }
|
225 |
+
size_t pos() const { return pos_; }
|
226 |
+
size_t size() const { return size_; }
|
227 |
+
void set_pos(size_t pos) { pos_ = pos; }
|
228 |
+
|
229 |
+
private:
|
230 |
+
Block* next_; // Next block for this thread.
|
231 |
+
size_t pos_;
|
232 |
+
size_t size_;
|
233 |
+
// data follows
|
234 |
+
};
|
235 |
+
|
236 |
+
struct ThreadCache {
|
237 |
+
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
|
238 |
+
// If we are using the ThreadLocalStorage class to store the ThreadCache,
|
239 |
+
// then the ThreadCache's default constructor has to be responsible for
|
240 |
+
// initializing it.
|
241 |
+
ThreadCache() : last_lifecycle_id_seen(-1), last_serial_arena(NULL) {}
|
242 |
+
#endif
|
243 |
+
|
244 |
+
// The ThreadCache is considered valid as long as this matches the
|
245 |
+
// lifecycle_id of the arena being used.
|
246 |
+
int64 last_lifecycle_id_seen;
|
247 |
+
SerialArena* last_serial_arena;
|
248 |
+
};
|
249 |
+
static std::atomic<int64> lifecycle_id_generator_;
|
250 |
+
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
|
251 |
+
// Android ndk does not support GOOGLE_THREAD_LOCAL keyword so we use a custom thread
|
252 |
+
// local storage class we implemented.
|
253 |
+
// iOS also does not support the GOOGLE_THREAD_LOCAL keyword.
|
254 |
+
static ThreadCache& thread_cache();
|
255 |
+
#elif defined(PROTOBUF_USE_DLLS)
|
256 |
+
// Thread local variables cannot be exposed through DLL interface but we can
|
257 |
+
// wrap them in static functions.
|
258 |
+
static ThreadCache& thread_cache();
|
259 |
+
#else
|
260 |
+
static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_;
|
261 |
+
static ThreadCache& thread_cache() { return thread_cache_; }
|
262 |
+
#endif
|
263 |
+
|
264 |
+
void Init();
|
265 |
+
|
266 |
+
// Free all blocks and return the total space used which is the sums of sizes
|
267 |
+
// of the all the allocated blocks.
|
268 |
+
uint64 FreeBlocks();
|
269 |
+
// Delete or Destruct all objects owned by the arena.
|
270 |
+
void CleanupList();
|
271 |
+
|
272 |
+
inline void CacheSerialArena(SerialArena* serial) {
|
273 |
+
thread_cache().last_serial_arena = serial;
|
274 |
+
thread_cache().last_lifecycle_id_seen = lifecycle_id_;
|
275 |
+
// TODO(haberman): evaluate whether we would gain efficiency by getting rid
|
276 |
+
// of hint_. It's the only write we do to ArenaImpl in the allocation path,
|
277 |
+
// which will dirty the cache line.
|
278 |
+
|
279 |
+
hint_.store(serial, std::memory_order_release);
|
280 |
+
}
|
281 |
+
|
282 |
+
|
283 |
+
std::atomic<SerialArena*>
|
284 |
+
threads_; // Pointer to a linked list of SerialArena.
|
285 |
+
std::atomic<SerialArena*> hint_; // Fast thread-local block access
|
286 |
+
std::atomic<size_t> space_allocated_; // Total size of all allocated blocks.
|
287 |
+
|
288 |
+
Block *initial_block_; // If non-NULL, points to the block that came from
|
289 |
+
// user data.
|
290 |
+
|
291 |
+
Block* NewBlock(Block* last_block, size_t min_bytes);
|
292 |
+
|
293 |
+
SerialArena* GetSerialArena();
|
294 |
+
bool GetSerialArenaFast(SerialArena** arena);
|
295 |
+
SerialArena* GetSerialArenaFallback(void* me);
|
296 |
+
int64 lifecycle_id_; // Unique for each arena. Changes on Reset().
|
297 |
+
|
298 |
+
Options options_;
|
299 |
+
|
300 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArenaImpl);
|
301 |
+
// All protos have pointers back to the arena hence Arena must have
|
302 |
+
// pointer stability.
|
303 |
+
ArenaImpl(ArenaImpl&&) = delete;
|
304 |
+
ArenaImpl& operator=(ArenaImpl&&) = delete;
|
305 |
+
|
306 |
+
public:
|
307 |
+
// kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8
|
308 |
+
// to protect the invariant that pos is always at a multiple of 8.
|
309 |
+
static const size_t kBlockHeaderSize = (sizeof(Block) + 7) & -8;
|
310 |
+
static const size_t kSerialArenaSize = (sizeof(SerialArena) + 7) & -8;
|
311 |
+
static_assert(kBlockHeaderSize % 8 == 0,
|
312 |
+
"kBlockHeaderSize must be a multiple of 8.");
|
313 |
+
static_assert(kSerialArenaSize % 8 == 0,
|
314 |
+
"kSerialArenaSize must be a multiple of 8.");
|
315 |
+
};
|
316 |
+
|
317 |
+
} // namespace internal
|
318 |
+
} // namespace protobuf
|
319 |
+
|
320 |
+
} // namespace google
|
321 |
+
#endif // GOOGLE_PROTOBUF_ARENA_IMPL_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/arenastring.h
ADDED
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_ARENASTRING_H__
|
32 |
+
#define GOOGLE_PROTOBUF_ARENASTRING_H__
|
33 |
+
|
34 |
+
#include <string>
|
35 |
+
|
36 |
+
#include <google/protobuf/arena.h>
|
37 |
+
#include <google/protobuf/stubs/common.h>
|
38 |
+
#include <google/protobuf/stubs/fastmem.h>
|
39 |
+
#include <google/protobuf/stubs/logging.h>
|
40 |
+
#include <google/protobuf/stubs/port.h>
|
41 |
+
|
42 |
+
// This is the implementation of arena string fields written for the open-source
|
43 |
+
// release. The ArenaStringPtr struct below is an internal implementation class
|
44 |
+
// and *should not be used* by user code. It is used to collect string
|
45 |
+
// operations together into one place and abstract away the underlying
|
46 |
+
// string-field pointer representation, so that (for example) an alternate
|
47 |
+
// implementation that knew more about ::std::string's internals could integrate more
|
48 |
+
// closely with the arena allocator.
|
49 |
+
|
50 |
+
namespace google {
|
51 |
+
namespace protobuf {
|
52 |
+
namespace internal {
|
53 |
+
|
54 |
+
template <typename T>
|
55 |
+
class TaggedPtr {
|
56 |
+
public:
|
57 |
+
void Set(T* p) { ptr_ = reinterpret_cast<uintptr_t>(p); }
|
58 |
+
T* Get() const { return reinterpret_cast<T*>(ptr_); }
|
59 |
+
|
60 |
+
bool IsNull() { return ptr_ == 0; }
|
61 |
+
|
62 |
+
private:
|
63 |
+
uintptr_t ptr_;
|
64 |
+
};
|
65 |
+
|
66 |
+
struct LIBPROTOBUF_EXPORT ArenaStringPtr {
|
67 |
+
inline void Set(const ::std::string* default_value,
|
68 |
+
const ::std::string& value, ::google::protobuf::Arena* arena) {
|
69 |
+
if (ptr_ == default_value) {
|
70 |
+
CreateInstance(arena, &value);
|
71 |
+
} else {
|
72 |
+
*ptr_ = value;
|
73 |
+
}
|
74 |
+
}
|
75 |
+
|
76 |
+
inline void SetLite(const ::std::string* default_value,
|
77 |
+
const ::std::string& value,
|
78 |
+
::google::protobuf::Arena* arena) {
|
79 |
+
Set(default_value, value, arena);
|
80 |
+
}
|
81 |
+
|
82 |
+
// Basic accessors.
|
83 |
+
inline const ::std::string& Get() const { return *ptr_; }
|
84 |
+
|
85 |
+
inline ::std::string* Mutable(const ::std::string* default_value,
|
86 |
+
::google::protobuf::Arena* arena) {
|
87 |
+
if (ptr_ == default_value) {
|
88 |
+
CreateInstance(arena, default_value);
|
89 |
+
}
|
90 |
+
return ptr_;
|
91 |
+
}
|
92 |
+
|
93 |
+
// Release returns a ::std::string* instance that is heap-allocated and is not
|
94 |
+
// Own()'d by any arena. If the field was not set, it returns NULL. The caller
|
95 |
+
// retains ownership. Clears this field back to NULL state. Used to implement
|
96 |
+
// release_<field>() methods on generated classes.
|
97 |
+
inline ::std::string* Release(const ::std::string* default_value,
|
98 |
+
::google::protobuf::Arena* arena) {
|
99 |
+
if (ptr_ == default_value) {
|
100 |
+
return NULL;
|
101 |
+
}
|
102 |
+
return ReleaseNonDefault(default_value, arena);
|
103 |
+
}
|
104 |
+
|
105 |
+
// Similar to Release, but ptr_ cannot be the default_value.
|
106 |
+
inline ::std::string* ReleaseNonDefault(
|
107 |
+
const ::std::string* default_value, ::google::protobuf::Arena* arena) {
|
108 |
+
GOOGLE_DCHECK(!IsDefault(default_value));
|
109 |
+
::std::string* released = NULL;
|
110 |
+
if (arena != NULL) {
|
111 |
+
// ptr_ is owned by the arena.
|
112 |
+
released = new ::std::string;
|
113 |
+
released->swap(*ptr_);
|
114 |
+
} else {
|
115 |
+
released = ptr_;
|
116 |
+
}
|
117 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
118 |
+
return released;
|
119 |
+
}
|
120 |
+
|
121 |
+
// UnsafeArenaRelease returns a ::std::string*, but it may be arena-owned (i.e.
|
122 |
+
// have its destructor already registered) if arena != NULL. If the field was
|
123 |
+
// not set, this returns NULL. This method clears this field back to NULL
|
124 |
+
// state. Used to implement unsafe_arena_release_<field>() methods on
|
125 |
+
// generated classes.
|
126 |
+
inline ::std::string* UnsafeArenaRelease(const ::std::string* default_value,
|
127 |
+
::google::protobuf::Arena* /* arena */) {
|
128 |
+
if (ptr_ == default_value) {
|
129 |
+
return NULL;
|
130 |
+
}
|
131 |
+
::std::string* released = ptr_;
|
132 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
133 |
+
return released;
|
134 |
+
}
|
135 |
+
|
136 |
+
// Takes a string that is heap-allocated, and takes ownership. The string's
|
137 |
+
// destructor is registered with the arena. Used to implement
|
138 |
+
// set_allocated_<field> in generated classes.
|
139 |
+
inline void SetAllocated(const ::std::string* default_value,
|
140 |
+
::std::string* value, ::google::protobuf::Arena* arena) {
|
141 |
+
if (arena == NULL && ptr_ != default_value) {
|
142 |
+
Destroy(default_value, arena);
|
143 |
+
}
|
144 |
+
if (value != NULL) {
|
145 |
+
ptr_ = value;
|
146 |
+
if (arena != NULL) {
|
147 |
+
arena->Own(value);
|
148 |
+
}
|
149 |
+
} else {
|
150 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
151 |
+
}
|
152 |
+
}
|
153 |
+
|
154 |
+
// Takes a string that has lifetime equal to the arena's lifetime. The arena
|
155 |
+
// must be non-null. It is safe only to pass this method a value returned by
|
156 |
+
// UnsafeArenaRelease() on another field of a message in the same arena. Used
|
157 |
+
// to implement unsafe_arena_set_allocated_<field> in generated classes.
|
158 |
+
inline void UnsafeArenaSetAllocated(const ::std::string* default_value,
|
159 |
+
::std::string* value,
|
160 |
+
::google::protobuf::Arena* /* arena */) {
|
161 |
+
if (value != NULL) {
|
162 |
+
ptr_ = value;
|
163 |
+
} else {
|
164 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
165 |
+
}
|
166 |
+
}
|
167 |
+
|
168 |
+
// Swaps internal pointers. Arena-safety semantics: this is guarded by the
|
169 |
+
// logic in Swap()/UnsafeArenaSwap() at the message level, so this method is
|
170 |
+
// 'unsafe' if called directly.
|
171 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap(ArenaStringPtr* other) {
|
172 |
+
std::swap(ptr_, other->ptr_);
|
173 |
+
}
|
174 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap(
|
175 |
+
ArenaStringPtr* other, const ::std::string* default_value, Arena* arena) {
|
176 |
+
#ifndef NDEBUG
|
177 |
+
// For debug builds, we swap the contents of the string, rather than the
|
178 |
+
// string instances themselves. This invalidates previously taken const
|
179 |
+
// references that are (per our documentation) invalidated by calling Swap()
|
180 |
+
// on the message.
|
181 |
+
//
|
182 |
+
// If both strings are the default_value, swapping is uninteresting.
|
183 |
+
// Otherwise, we use ArenaStringPtr::Mutable() to access the string, to
|
184 |
+
// ensure that we do not try to mutate default_value itself.
|
185 |
+
if (IsDefault(default_value) && other->IsDefault(default_value)) {
|
186 |
+
return;
|
187 |
+
}
|
188 |
+
|
189 |
+
::std::string* this_ptr = Mutable(default_value, arena);
|
190 |
+
::std::string* other_ptr = other->Mutable(default_value, arena);
|
191 |
+
|
192 |
+
this_ptr->swap(*other_ptr);
|
193 |
+
#else
|
194 |
+
std::swap(ptr_, other->ptr_);
|
195 |
+
#endif
|
196 |
+
}
|
197 |
+
|
198 |
+
// Frees storage (if not on an arena).
|
199 |
+
inline void Destroy(const ::std::string* default_value,
|
200 |
+
::google::protobuf::Arena* arena) {
|
201 |
+
if (arena == NULL && ptr_ != default_value) {
|
202 |
+
delete ptr_;
|
203 |
+
}
|
204 |
+
}
|
205 |
+
|
206 |
+
// Clears content, but keeps allocated string if arena != NULL, to avoid the
|
207 |
+
// overhead of heap operations. After this returns, the content (as seen by
|
208 |
+
// the user) will always be the empty string. Assumes that |default_value|
|
209 |
+
// is an empty string.
|
210 |
+
inline void ClearToEmpty(const ::std::string* default_value,
|
211 |
+
::google::protobuf::Arena* /* arena */) {
|
212 |
+
if (ptr_ == default_value) {
|
213 |
+
// Already set to default (which is empty) -- do nothing.
|
214 |
+
} else {
|
215 |
+
ptr_->clear();
|
216 |
+
}
|
217 |
+
}
|
218 |
+
|
219 |
+
// Clears content, assuming that the current value is not the empty string
|
220 |
+
// default.
|
221 |
+
inline void ClearNonDefaultToEmpty() {
|
222 |
+
ptr_->clear();
|
223 |
+
}
|
224 |
+
inline void ClearNonDefaultToEmptyNoArena() {
|
225 |
+
ptr_->clear();
|
226 |
+
}
|
227 |
+
|
228 |
+
// Clears content, but keeps allocated string if arena != NULL, to avoid the
|
229 |
+
// overhead of heap operations. After this returns, the content (as seen by
|
230 |
+
// the user) will always be equal to |default_value|.
|
231 |
+
inline void ClearToDefault(const ::std::string* default_value,
|
232 |
+
::google::protobuf::Arena* /* arena */) {
|
233 |
+
if (ptr_ == default_value) {
|
234 |
+
// Already set to default -- do nothing.
|
235 |
+
} else {
|
236 |
+
// Have another allocated string -- rather than throwing this away and
|
237 |
+
// resetting ptr_ to the canonical default string instance, we just reuse
|
238 |
+
// this instance.
|
239 |
+
*ptr_ = *default_value;
|
240 |
+
}
|
241 |
+
}
|
242 |
+
|
243 |
+
// Called from generated code / reflection runtime only. Resets value to point
|
244 |
+
// to a default string pointer, with the semantics that this ArenaStringPtr
|
245 |
+
// does not own the pointed-to memory. Disregards initial value of ptr_ (so
|
246 |
+
// this is the *ONLY* safe method to call after construction or when
|
247 |
+
// reinitializing after becoming the active field in a oneof union).
|
248 |
+
inline void UnsafeSetDefault(const ::std::string* default_value) {
|
249 |
+
// Casting away 'const' is safe here: accessors ensure that ptr_ is only
|
250 |
+
// returned as a const if it is equal to default_value.
|
251 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
252 |
+
}
|
253 |
+
|
254 |
+
// The 'NoArena' variants of methods below assume arena == NULL and are
|
255 |
+
// optimized to provide very little overhead relative to a raw string pointer
|
256 |
+
// (while still being in-memory compatible with other code that assumes
|
257 |
+
// ArenaStringPtr). Note the invariant that a class instance that has only
|
258 |
+
// ever been mutated by NoArena methods must *only* be in the String state
|
259 |
+
// (i.e., tag bits are not used), *NEVER* ArenaString. This allows all
|
260 |
+
// tagged-pointer manipulations to be avoided.
|
261 |
+
inline void SetNoArena(const ::std::string* default_value,
|
262 |
+
const ::std::string& value) {
|
263 |
+
if (ptr_ == default_value) {
|
264 |
+
CreateInstanceNoArena(&value);
|
265 |
+
} else {
|
266 |
+
*ptr_ = value;
|
267 |
+
}
|
268 |
+
}
|
269 |
+
|
270 |
+
#if LANG_CXX11
|
271 |
+
void SetNoArena(const ::std::string* default_value, ::std::string&& value) {
|
272 |
+
if (IsDefault(default_value)) {
|
273 |
+
ptr_ = new ::std::string(std::move(value));
|
274 |
+
} else {
|
275 |
+
*ptr_ = std::move(value);
|
276 |
+
}
|
277 |
+
}
|
278 |
+
#endif
|
279 |
+
|
280 |
+
void AssignWithDefault(const ::std::string* default_value, ArenaStringPtr value);
|
281 |
+
|
282 |
+
inline const ::std::string& GetNoArena() const { return *ptr_; }
|
283 |
+
|
284 |
+
inline ::std::string* MutableNoArena(const ::std::string* default_value) {
|
285 |
+
if (ptr_ == default_value) {
|
286 |
+
CreateInstanceNoArena(default_value);
|
287 |
+
}
|
288 |
+
return ptr_;
|
289 |
+
}
|
290 |
+
|
291 |
+
inline ::std::string* ReleaseNoArena(const ::std::string* default_value) {
|
292 |
+
if (ptr_ == default_value) {
|
293 |
+
return NULL;
|
294 |
+
} else {
|
295 |
+
return ReleaseNonDefaultNoArena(default_value);
|
296 |
+
}
|
297 |
+
}
|
298 |
+
|
299 |
+
inline ::std::string* ReleaseNonDefaultNoArena(
|
300 |
+
const ::std::string* default_value) {
|
301 |
+
GOOGLE_DCHECK(!IsDefault(default_value));
|
302 |
+
::std::string* released = ptr_;
|
303 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
304 |
+
return released;
|
305 |
+
}
|
306 |
+
|
307 |
+
|
308 |
+
inline void SetAllocatedNoArena(const ::std::string* default_value,
|
309 |
+
::std::string* value) {
|
310 |
+
if (ptr_ != default_value) {
|
311 |
+
delete ptr_;
|
312 |
+
}
|
313 |
+
if (value != NULL) {
|
314 |
+
ptr_ = value;
|
315 |
+
} else {
|
316 |
+
ptr_ = const_cast< ::std::string* >(default_value);
|
317 |
+
}
|
318 |
+
}
|
319 |
+
|
320 |
+
inline void DestroyNoArena(const ::std::string* default_value) {
|
321 |
+
if (ptr_ != default_value) {
|
322 |
+
delete ptr_;
|
323 |
+
}
|
324 |
+
}
|
325 |
+
|
326 |
+
inline void ClearToEmptyNoArena(const ::std::string* default_value) {
|
327 |
+
if (ptr_ == default_value) {
|
328 |
+
// Nothing: already equal to default (which is the empty string).
|
329 |
+
} else {
|
330 |
+
ptr_->clear();
|
331 |
+
}
|
332 |
+
}
|
333 |
+
|
334 |
+
inline void ClearToDefaultNoArena(const ::std::string* default_value) {
|
335 |
+
if (ptr_ == default_value) {
|
336 |
+
// Nothing: already set to default.
|
337 |
+
} else {
|
338 |
+
// Reuse existing allocated instance.
|
339 |
+
*ptr_ = *default_value;
|
340 |
+
}
|
341 |
+
}
|
342 |
+
|
343 |
+
// Internal accessor used only at parse time to provide direct access to the
|
344 |
+
// raw pointer from the shared parse routine (in the non-arenas case). The
|
345 |
+
// parse routine does the string allocation in order to save code size in the
|
346 |
+
// generated parsing code.
|
347 |
+
inline ::std::string** UnsafeRawStringPointer() {
|
348 |
+
return &ptr_;
|
349 |
+
}
|
350 |
+
|
351 |
+
inline bool IsDefault(const ::std::string* default_value) const {
|
352 |
+
return ptr_ == default_value;
|
353 |
+
}
|
354 |
+
|
355 |
+
// Internal accessors!!!!
|
356 |
+
void UnsafeSetTaggedPointer(TaggedPtr< ::std::string> value) {
|
357 |
+
ptr_ = value.Get();
|
358 |
+
}
|
359 |
+
// Generated code only! An optimization, in certain cases the generated
|
360 |
+
// code is certain we can obtain a string with no default checks and
|
361 |
+
// tag tests.
|
362 |
+
::std::string* UnsafeMutablePointer() { return ptr_; }
|
363 |
+
|
364 |
+
private:
|
365 |
+
::std::string* ptr_;
|
366 |
+
|
367 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
368 |
+
void CreateInstance(::google::protobuf::Arena* arena,
|
369 |
+
const ::std::string* initial_value) {
|
370 |
+
GOOGLE_DCHECK(initial_value != NULL);
|
371 |
+
// uses "new ::std::string" when arena is nullptr
|
372 |
+
ptr_ = Arena::Create< ::std::string >(arena, *initial_value);
|
373 |
+
}
|
374 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
375 |
+
void CreateInstanceNoArena(const ::std::string* initial_value) {
|
376 |
+
GOOGLE_DCHECK(initial_value != NULL);
|
377 |
+
ptr_ = new ::std::string(*initial_value);
|
378 |
+
}
|
379 |
+
};
|
380 |
+
|
381 |
+
} // namespace internal
|
382 |
+
} // namespace protobuf
|
383 |
+
|
384 |
+
|
385 |
+
|
386 |
+
namespace protobuf {
|
387 |
+
namespace internal {
|
388 |
+
|
389 |
+
inline void ArenaStringPtr::AssignWithDefault(const ::std::string* default_value,
|
390 |
+
ArenaStringPtr value) {
|
391 |
+
const ::std::string* me = *UnsafeRawStringPointer();
|
392 |
+
const ::std::string* other = *value.UnsafeRawStringPointer();
|
393 |
+
// If the pointers are the same then do nothing.
|
394 |
+
if (me != other) {
|
395 |
+
SetNoArena(default_value, value.GetNoArena());
|
396 |
+
}
|
397 |
+
}
|
398 |
+
|
399 |
+
} // namespace internal
|
400 |
+
} // namespace protobuf
|
401 |
+
|
402 |
+
} // namespace google
|
403 |
+
#endif // GOOGLE_PROTOBUF_ARENASTRING_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/extension_set.h
ADDED
@@ -0,0 +1,1462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This header is logically internal, but is made public because it is used
|
36 |
+
// from protocol-compiler-generated code, which may reside in other components.
|
37 |
+
|
38 |
+
#ifndef GOOGLE_PROTOBUF_EXTENSION_SET_H__
|
39 |
+
#define GOOGLE_PROTOBUF_EXTENSION_SET_H__
|
40 |
+
|
41 |
+
#include <algorithm>
|
42 |
+
#include <cassert>
|
43 |
+
#include <map>
|
44 |
+
#include <string>
|
45 |
+
#include <utility>
|
46 |
+
#include <vector>
|
47 |
+
|
48 |
+
#include <google/protobuf/stubs/common.h>
|
49 |
+
#include <google/protobuf/stubs/logging.h>
|
50 |
+
#include <google/protobuf/stubs/once.h>
|
51 |
+
#include <google/protobuf/repeated_field.h>
|
52 |
+
|
53 |
+
namespace google {
|
54 |
+
|
55 |
+
namespace protobuf {
|
56 |
+
class Arena;
|
57 |
+
class Descriptor; // descriptor.h
|
58 |
+
class FieldDescriptor; // descriptor.h
|
59 |
+
class DescriptorPool; // descriptor.h
|
60 |
+
class MessageLite; // message_lite.h
|
61 |
+
class Message; // message.h
|
62 |
+
class MessageFactory; // message.h
|
63 |
+
class UnknownFieldSet; // unknown_field_set.h
|
64 |
+
namespace io {
|
65 |
+
class CodedInputStream; // coded_stream.h
|
66 |
+
class CodedOutputStream; // coded_stream.h
|
67 |
+
}
|
68 |
+
namespace internal {
|
69 |
+
class FieldSkipper; // wire_format_lite.h
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
namespace protobuf {
|
74 |
+
namespace internal {
|
75 |
+
|
76 |
+
// Used to store values of type WireFormatLite::FieldType without having to
|
77 |
+
// #include wire_format_lite.h. Also, ensures that we use only one byte to
|
78 |
+
// store these values, which is important to keep the layout of
|
79 |
+
// ExtensionSet::Extension small.
|
80 |
+
typedef uint8 FieldType;
|
81 |
+
|
82 |
+
// A function which, given an integer value, returns true if the number
|
83 |
+
// matches one of the defined values for the corresponding enum type. This
|
84 |
+
// is used with RegisterEnumExtension, below.
|
85 |
+
typedef bool EnumValidityFunc(int number);
|
86 |
+
|
87 |
+
// Version of the above which takes an argument. This is needed to deal with
|
88 |
+
// extensions that are not compiled in.
|
89 |
+
typedef bool EnumValidityFuncWithArg(const void* arg, int number);
|
90 |
+
|
91 |
+
// Information about a registered extension.
|
92 |
+
struct ExtensionInfo {
|
93 |
+
inline ExtensionInfo() {}
|
94 |
+
inline ExtensionInfo(FieldType type_param, bool isrepeated, bool ispacked)
|
95 |
+
: type(type_param), is_repeated(isrepeated), is_packed(ispacked),
|
96 |
+
descriptor(NULL) {}
|
97 |
+
|
98 |
+
FieldType type;
|
99 |
+
bool is_repeated;
|
100 |
+
bool is_packed;
|
101 |
+
|
102 |
+
struct EnumValidityCheck {
|
103 |
+
EnumValidityFuncWithArg* func;
|
104 |
+
const void* arg;
|
105 |
+
};
|
106 |
+
|
107 |
+
union {
|
108 |
+
EnumValidityCheck enum_validity_check;
|
109 |
+
const MessageLite* message_prototype;
|
110 |
+
};
|
111 |
+
|
112 |
+
// The descriptor for this extension, if one exists and is known. May be
|
113 |
+
// NULL. Must not be NULL if the descriptor for the extension does not
|
114 |
+
// live in the same pool as the descriptor for the containing type.
|
115 |
+
const FieldDescriptor* descriptor;
|
116 |
+
};
|
117 |
+
|
118 |
+
// Abstract interface for an object which looks up extension definitions. Used
|
119 |
+
// when parsing.
|
120 |
+
class LIBPROTOBUF_EXPORT ExtensionFinder {
|
121 |
+
public:
|
122 |
+
virtual ~ExtensionFinder();
|
123 |
+
|
124 |
+
// Find the extension with the given containing type and number.
|
125 |
+
virtual bool Find(int number, ExtensionInfo* output) = 0;
|
126 |
+
};
|
127 |
+
|
128 |
+
// Implementation of ExtensionFinder which finds extensions defined in .proto
|
129 |
+
// files which have been compiled into the binary.
|
130 |
+
class LIBPROTOBUF_EXPORT GeneratedExtensionFinder : public ExtensionFinder {
|
131 |
+
public:
|
132 |
+
GeneratedExtensionFinder(const MessageLite* containing_type)
|
133 |
+
: containing_type_(containing_type) {}
|
134 |
+
virtual ~GeneratedExtensionFinder() {}
|
135 |
+
|
136 |
+
// Returns true and fills in *output if found, otherwise returns false.
|
137 |
+
virtual bool Find(int number, ExtensionInfo* output);
|
138 |
+
|
139 |
+
private:
|
140 |
+
const MessageLite* containing_type_;
|
141 |
+
};
|
142 |
+
|
143 |
+
// A FieldSkipper used for parsing MessageSet.
|
144 |
+
class MessageSetFieldSkipper;
|
145 |
+
|
146 |
+
// Note: extension_set_heavy.cc defines DescriptorPoolExtensionFinder for
|
147 |
+
// finding extensions from a DescriptorPool.
|
148 |
+
|
149 |
+
// This is an internal helper class intended for use within the protocol buffer
|
150 |
+
// library and generated classes. Clients should not use it directly. Instead,
|
151 |
+
// use the generated accessors such as GetExtension() of the class being
|
152 |
+
// extended.
|
153 |
+
//
|
154 |
+
// This class manages extensions for a protocol message object. The
|
155 |
+
// message's HasExtension(), GetExtension(), MutableExtension(), and
|
156 |
+
// ClearExtension() methods are just thin wrappers around the embedded
|
157 |
+
// ExtensionSet. When parsing, if a tag number is encountered which is
|
158 |
+
// inside one of the message type's extension ranges, the tag is passed
|
159 |
+
// off to the ExtensionSet for parsing. Etc.
|
160 |
+
class LIBPROTOBUF_EXPORT ExtensionSet {
|
161 |
+
public:
|
162 |
+
ExtensionSet();
|
163 |
+
explicit ExtensionSet(::google::protobuf::Arena* arena);
|
164 |
+
~ExtensionSet();
|
165 |
+
|
166 |
+
// These are called at startup by protocol-compiler-generated code to
|
167 |
+
// register known extensions. The registrations are used by ParseField()
|
168 |
+
// to look up extensions for parsed field numbers. Note that dynamic parsing
|
169 |
+
// does not use ParseField(); only protocol-compiler-generated parsing
|
170 |
+
// methods do.
|
171 |
+
static void RegisterExtension(const MessageLite* containing_type,
|
172 |
+
int number, FieldType type,
|
173 |
+
bool is_repeated, bool is_packed);
|
174 |
+
static void RegisterEnumExtension(const MessageLite* containing_type,
|
175 |
+
int number, FieldType type,
|
176 |
+
bool is_repeated, bool is_packed,
|
177 |
+
EnumValidityFunc* is_valid);
|
178 |
+
static void RegisterMessageExtension(const MessageLite* containing_type,
|
179 |
+
int number, FieldType type,
|
180 |
+
bool is_repeated, bool is_packed,
|
181 |
+
const MessageLite* prototype);
|
182 |
+
|
183 |
+
// =================================================================
|
184 |
+
|
185 |
+
// Add all fields which are currently present to the given vector. This
|
186 |
+
// is useful to implement Reflection::ListFields().
|
187 |
+
void AppendToList(const Descriptor* containing_type,
|
188 |
+
const DescriptorPool* pool,
|
189 |
+
std::vector<const FieldDescriptor*>* output) const;
|
190 |
+
|
191 |
+
// =================================================================
|
192 |
+
// Accessors
|
193 |
+
//
|
194 |
+
// Generated message classes include type-safe templated wrappers around
|
195 |
+
// these methods. Generally you should use those rather than call these
|
196 |
+
// directly, unless you are doing low-level memory management.
|
197 |
+
//
|
198 |
+
// When calling any of these accessors, the extension number requested
|
199 |
+
// MUST exist in the DescriptorPool provided to the constructor. Otherwise,
|
200 |
+
// the method will fail an assert. Normally, though, you would not call
|
201 |
+
// these directly; you would either call the generated accessors of your
|
202 |
+
// message class (e.g. GetExtension()) or you would call the accessors
|
203 |
+
// of the reflection interface. In both cases, it is impossible to
|
204 |
+
// trigger this assert failure: the generated accessors only accept
|
205 |
+
// linked-in extension types as parameters, while the Reflection interface
|
206 |
+
// requires you to provide the FieldDescriptor describing the extension.
|
207 |
+
//
|
208 |
+
// When calling any of these accessors, a protocol-compiler-generated
|
209 |
+
// implementation of the extension corresponding to the number MUST
|
210 |
+
// be linked in, and the FieldDescriptor used to refer to it MUST be
|
211 |
+
// the one generated by that linked-in code. Otherwise, the method will
|
212 |
+
// die on an assert failure. The message objects returned by the message
|
213 |
+
// accessors are guaranteed to be of the correct linked-in type.
|
214 |
+
//
|
215 |
+
// These methods pretty much match Reflection except that:
|
216 |
+
// - They're not virtual.
|
217 |
+
// - They identify fields by number rather than FieldDescriptors.
|
218 |
+
// - They identify enum values using integers rather than descriptors.
|
219 |
+
// - Strings provide Mutable() in addition to Set() accessors.
|
220 |
+
|
221 |
+
bool Has(int number) const;
|
222 |
+
int ExtensionSize(int number) const; // Size of a repeated extension.
|
223 |
+
int NumExtensions() const; // The number of extensions
|
224 |
+
FieldType ExtensionType(int number) const;
|
225 |
+
void ClearExtension(int number);
|
226 |
+
|
227 |
+
// singular fields -------------------------------------------------
|
228 |
+
|
229 |
+
int32 GetInt32 (int number, int32 default_value) const;
|
230 |
+
int64 GetInt64 (int number, int64 default_value) const;
|
231 |
+
uint32 GetUInt32(int number, uint32 default_value) const;
|
232 |
+
uint64 GetUInt64(int number, uint64 default_value) const;
|
233 |
+
float GetFloat (int number, float default_value) const;
|
234 |
+
double GetDouble(int number, double default_value) const;
|
235 |
+
bool GetBool (int number, bool default_value) const;
|
236 |
+
int GetEnum (int number, int default_value) const;
|
237 |
+
const string & GetString (int number, const string& default_value) const;
|
238 |
+
const MessageLite& GetMessage(int number,
|
239 |
+
const MessageLite& default_value) const;
|
240 |
+
const MessageLite& GetMessage(int number, const Descriptor* message_type,
|
241 |
+
MessageFactory* factory) const;
|
242 |
+
|
243 |
+
// |descriptor| may be NULL so long as it is known that the descriptor for
|
244 |
+
// the extension lives in the same pool as the descriptor for the containing
|
245 |
+
// type.
|
246 |
+
#define desc const FieldDescriptor* descriptor // avoid line wrapping
|
247 |
+
void SetInt32 (int number, FieldType type, int32 value, desc);
|
248 |
+
void SetInt64 (int number, FieldType type, int64 value, desc);
|
249 |
+
void SetUInt32(int number, FieldType type, uint32 value, desc);
|
250 |
+
void SetUInt64(int number, FieldType type, uint64 value, desc);
|
251 |
+
void SetFloat (int number, FieldType type, float value, desc);
|
252 |
+
void SetDouble(int number, FieldType type, double value, desc);
|
253 |
+
void SetBool (int number, FieldType type, bool value, desc);
|
254 |
+
void SetEnum (int number, FieldType type, int value, desc);
|
255 |
+
void SetString(int number, FieldType type, const string& value, desc);
|
256 |
+
string * MutableString (int number, FieldType type, desc);
|
257 |
+
MessageLite* MutableMessage(int number, FieldType type,
|
258 |
+
const MessageLite& prototype, desc);
|
259 |
+
MessageLite* MutableMessage(const FieldDescriptor* decsriptor,
|
260 |
+
MessageFactory* factory);
|
261 |
+
// Adds the given message to the ExtensionSet, taking ownership of the
|
262 |
+
// message object. Existing message with the same number will be deleted.
|
263 |
+
// If "message" is NULL, this is equivalent to "ClearExtension(number)".
|
264 |
+
void SetAllocatedMessage(int number, FieldType type,
|
265 |
+
const FieldDescriptor* descriptor,
|
266 |
+
MessageLite* message);
|
267 |
+
void UnsafeArenaSetAllocatedMessage(int number, FieldType type,
|
268 |
+
const FieldDescriptor* descriptor,
|
269 |
+
MessageLite* message);
|
270 |
+
MessageLite* ReleaseMessage(int number, const MessageLite& prototype);
|
271 |
+
MessageLite* UnsafeArenaReleaseMessage(
|
272 |
+
int number, const MessageLite& prototype);
|
273 |
+
|
274 |
+
MessageLite* ReleaseMessage(const FieldDescriptor* descriptor,
|
275 |
+
MessageFactory* factory);
|
276 |
+
MessageLite* UnsafeArenaReleaseMessage(const FieldDescriptor* descriptor,
|
277 |
+
MessageFactory* factory);
|
278 |
+
#undef desc
|
279 |
+
::google::protobuf::Arena* GetArenaNoVirtual() const { return arena_; }
|
280 |
+
|
281 |
+
// repeated fields -------------------------------------------------
|
282 |
+
|
283 |
+
// Fetches a RepeatedField extension by number; returns |default_value|
|
284 |
+
// if no such extension exists. User should not touch this directly; it is
|
285 |
+
// used by the GetRepeatedExtension() method.
|
286 |
+
const void* GetRawRepeatedField(int number, const void* default_value) const;
|
287 |
+
// Fetches a mutable version of a RepeatedField extension by number,
|
288 |
+
// instantiating one if none exists. Similar to above, user should not use
|
289 |
+
// this directly; it underlies MutableRepeatedExtension().
|
290 |
+
void* MutableRawRepeatedField(int number, FieldType field_type,
|
291 |
+
bool packed, const FieldDescriptor* desc);
|
292 |
+
|
293 |
+
// This is an overload of MutableRawRepeatedField to maintain compatibility
|
294 |
+
// with old code using a previous API. This version of
|
295 |
+
// MutableRawRepeatedField() will GOOGLE_CHECK-fail on a missing extension.
|
296 |
+
// (E.g.: borg/clients/internal/proto1/proto2_reflection.cc.)
|
297 |
+
void* MutableRawRepeatedField(int number);
|
298 |
+
|
299 |
+
int32 GetRepeatedInt32 (int number, int index) const;
|
300 |
+
int64 GetRepeatedInt64 (int number, int index) const;
|
301 |
+
uint32 GetRepeatedUInt32(int number, int index) const;
|
302 |
+
uint64 GetRepeatedUInt64(int number, int index) const;
|
303 |
+
float GetRepeatedFloat (int number, int index) const;
|
304 |
+
double GetRepeatedDouble(int number, int index) const;
|
305 |
+
bool GetRepeatedBool (int number, int index) const;
|
306 |
+
int GetRepeatedEnum (int number, int index) const;
|
307 |
+
const string & GetRepeatedString (int number, int index) const;
|
308 |
+
const MessageLite& GetRepeatedMessage(int number, int index) const;
|
309 |
+
|
310 |
+
void SetRepeatedInt32 (int number, int index, int32 value);
|
311 |
+
void SetRepeatedInt64 (int number, int index, int64 value);
|
312 |
+
void SetRepeatedUInt32(int number, int index, uint32 value);
|
313 |
+
void SetRepeatedUInt64(int number, int index, uint64 value);
|
314 |
+
void SetRepeatedFloat (int number, int index, float value);
|
315 |
+
void SetRepeatedDouble(int number, int index, double value);
|
316 |
+
void SetRepeatedBool (int number, int index, bool value);
|
317 |
+
void SetRepeatedEnum (int number, int index, int value);
|
318 |
+
void SetRepeatedString(int number, int index, const string& value);
|
319 |
+
string * MutableRepeatedString (int number, int index);
|
320 |
+
MessageLite* MutableRepeatedMessage(int number, int index);
|
321 |
+
|
322 |
+
#define desc const FieldDescriptor* descriptor // avoid line wrapping
|
323 |
+
void AddInt32 (int number, FieldType type, bool packed, int32 value, desc);
|
324 |
+
void AddInt64 (int number, FieldType type, bool packed, int64 value, desc);
|
325 |
+
void AddUInt32(int number, FieldType type, bool packed, uint32 value, desc);
|
326 |
+
void AddUInt64(int number, FieldType type, bool packed, uint64 value, desc);
|
327 |
+
void AddFloat (int number, FieldType type, bool packed, float value, desc);
|
328 |
+
void AddDouble(int number, FieldType type, bool packed, double value, desc);
|
329 |
+
void AddBool (int number, FieldType type, bool packed, bool value, desc);
|
330 |
+
void AddEnum (int number, FieldType type, bool packed, int value, desc);
|
331 |
+
void AddString(int number, FieldType type, const string& value, desc);
|
332 |
+
string * AddString (int number, FieldType type, desc);
|
333 |
+
MessageLite* AddMessage(int number, FieldType type,
|
334 |
+
const MessageLite& prototype, desc);
|
335 |
+
MessageLite* AddMessage(const FieldDescriptor* descriptor,
|
336 |
+
MessageFactory* factory);
|
337 |
+
void AddAllocatedMessage(const FieldDescriptor* descriptor,
|
338 |
+
MessageLite* new_entry);
|
339 |
+
#undef desc
|
340 |
+
|
341 |
+
void RemoveLast(int number);
|
342 |
+
MessageLite* ReleaseLast(int number);
|
343 |
+
void SwapElements(int number, int index1, int index2);
|
344 |
+
|
345 |
+
// -----------------------------------------------------------------
|
346 |
+
// TODO(kenton): Hardcore memory management accessors
|
347 |
+
|
348 |
+
// =================================================================
|
349 |
+
// convenience methods for implementing methods of Message
|
350 |
+
//
|
351 |
+
// These could all be implemented in terms of the other methods of this
|
352 |
+
// class, but providing them here helps keep the generated code size down.
|
353 |
+
|
354 |
+
void Clear();
|
355 |
+
void MergeFrom(const ExtensionSet& other);
|
356 |
+
void Swap(ExtensionSet* other);
|
357 |
+
void SwapExtension(ExtensionSet* other, int number);
|
358 |
+
bool IsInitialized() const;
|
359 |
+
|
360 |
+
// Parses a single extension from the input. The input should start out
|
361 |
+
// positioned immediately after the tag.
|
362 |
+
bool ParseField(uint32 tag, io::CodedInputStream* input,
|
363 |
+
ExtensionFinder* extension_finder,
|
364 |
+
FieldSkipper* field_skipper);
|
365 |
+
|
366 |
+
// Specific versions for lite or full messages (constructs the appropriate
|
367 |
+
// FieldSkipper automatically). |containing_type| is the default
|
368 |
+
// instance for the containing message; it is used only to look up the
|
369 |
+
// extension by number. See RegisterExtension(), above. Unlike the other
|
370 |
+
// methods of ExtensionSet, this only works for generated message types --
|
371 |
+
// it looks up extensions registered using RegisterExtension().
|
372 |
+
bool ParseField(uint32 tag, io::CodedInputStream* input,
|
373 |
+
const MessageLite* containing_type);
|
374 |
+
bool ParseField(uint32 tag, io::CodedInputStream* input,
|
375 |
+
const Message* containing_type,
|
376 |
+
UnknownFieldSet* unknown_fields);
|
377 |
+
bool ParseField(uint32 tag, io::CodedInputStream* input,
|
378 |
+
const MessageLite* containing_type,
|
379 |
+
io::CodedOutputStream* unknown_fields);
|
380 |
+
|
381 |
+
// Parse an entire message in MessageSet format. Such messages have no
|
382 |
+
// fields, only extensions.
|
383 |
+
bool ParseMessageSet(io::CodedInputStream* input,
|
384 |
+
ExtensionFinder* extension_finder,
|
385 |
+
MessageSetFieldSkipper* field_skipper);
|
386 |
+
|
387 |
+
// Specific versions for lite or full messages (constructs the appropriate
|
388 |
+
// FieldSkipper automatically).
|
389 |
+
bool ParseMessageSet(io::CodedInputStream* input,
|
390 |
+
const MessageLite* containing_type);
|
391 |
+
bool ParseMessageSet(io::CodedInputStream* input,
|
392 |
+
const Message* containing_type,
|
393 |
+
UnknownFieldSet* unknown_fields);
|
394 |
+
|
395 |
+
// Write all extension fields with field numbers in the range
|
396 |
+
// [start_field_number, end_field_number)
|
397 |
+
// to the output stream, using the cached sizes computed when ByteSize() was
|
398 |
+
// last called. Note that the range bounds are inclusive-exclusive.
|
399 |
+
void SerializeWithCachedSizes(int start_field_number,
|
400 |
+
int end_field_number,
|
401 |
+
io::CodedOutputStream* output) const;
|
402 |
+
|
403 |
+
// Same as SerializeWithCachedSizes, but without any bounds checking.
|
404 |
+
// The caller must ensure that target has sufficient capacity for the
|
405 |
+
// serialized extensions.
|
406 |
+
//
|
407 |
+
// Returns a pointer past the last written byte.
|
408 |
+
uint8* InternalSerializeWithCachedSizesToArray(int start_field_number,
|
409 |
+
int end_field_number,
|
410 |
+
bool deterministic,
|
411 |
+
uint8* target) const;
|
412 |
+
|
413 |
+
// Like above but serializes in MessageSet format.
|
414 |
+
void SerializeMessageSetWithCachedSizes(io::CodedOutputStream* output) const;
|
415 |
+
uint8* InternalSerializeMessageSetWithCachedSizesToArray(bool deterministic,
|
416 |
+
uint8* target) const;
|
417 |
+
|
418 |
+
// For backward-compatibility, versions of two of the above methods that
|
419 |
+
// serialize deterministically iff SetDefaultSerializationDeterministic()
|
420 |
+
// has been called.
|
421 |
+
uint8* SerializeWithCachedSizesToArray(int start_field_number,
|
422 |
+
int end_field_number,
|
423 |
+
uint8* target) const;
|
424 |
+
uint8* SerializeMessageSetWithCachedSizesToArray(uint8* target) const;
|
425 |
+
|
426 |
+
// Returns the total serialized size of all the extensions.
|
427 |
+
size_t ByteSize() const;
|
428 |
+
|
429 |
+
// Like ByteSize() but uses MessageSet format.
|
430 |
+
size_t MessageSetByteSize() const;
|
431 |
+
|
432 |
+
// Returns (an estimate of) the total number of bytes used for storing the
|
433 |
+
// extensions in memory, excluding sizeof(*this). If the ExtensionSet is
|
434 |
+
// for a lite message (and thus possibly contains lite messages), the results
|
435 |
+
// are undefined (might work, might crash, might corrupt data, might not even
|
436 |
+
// be linked in). It's up to the protocol compiler to avoid calling this on
|
437 |
+
// such ExtensionSets (easy enough since lite messages don't implement
|
438 |
+
// SpaceUsed()).
|
439 |
+
size_t SpaceUsedExcludingSelfLong() const;
|
440 |
+
|
441 |
+
// This method just calls SpaceUsedExcludingSelfLong() but it can not be
|
442 |
+
// inlined because the definition of SpaceUsedExcludingSelfLong() is not
|
443 |
+
// included in lite runtime and when an inline method refers to it MSVC
|
444 |
+
// will complain about unresolved symbols when building the lite runtime
|
445 |
+
// as .dll.
|
446 |
+
int SpaceUsedExcludingSelf() const;
|
447 |
+
|
448 |
+
private:
|
449 |
+
|
450 |
+
// Interface of a lazily parsed singular message extension.
|
451 |
+
class LIBPROTOBUF_EXPORT LazyMessageExtension {
|
452 |
+
public:
|
453 |
+
LazyMessageExtension() {}
|
454 |
+
virtual ~LazyMessageExtension() {}
|
455 |
+
|
456 |
+
virtual LazyMessageExtension* New(::google::protobuf::Arena* arena) const = 0;
|
457 |
+
virtual const MessageLite& GetMessage(
|
458 |
+
const MessageLite& prototype) const = 0;
|
459 |
+
virtual MessageLite* MutableMessage(const MessageLite& prototype) = 0;
|
460 |
+
virtual void SetAllocatedMessage(MessageLite *message) = 0;
|
461 |
+
virtual void UnsafeArenaSetAllocatedMessage(MessageLite *message) = 0;
|
462 |
+
virtual MessageLite* ReleaseMessage(const MessageLite& prototype) = 0;
|
463 |
+
virtual MessageLite* UnsafeArenaReleaseMessage(
|
464 |
+
const MessageLite& prototype) = 0;
|
465 |
+
|
466 |
+
virtual bool IsInitialized() const = 0;
|
467 |
+
|
468 |
+
PROTOBUF_RUNTIME_DEPRECATED("Please use ByteSizeLong() instead")
|
469 |
+
virtual int ByteSize() const {
|
470 |
+
return internal::ToIntSize(ByteSizeLong());
|
471 |
+
}
|
472 |
+
virtual size_t ByteSizeLong() const = 0;
|
473 |
+
virtual size_t SpaceUsedLong() const = 0;
|
474 |
+
|
475 |
+
virtual void MergeFrom(const LazyMessageExtension& other) = 0;
|
476 |
+
virtual void Clear() = 0;
|
477 |
+
|
478 |
+
virtual bool ReadMessage(const MessageLite& prototype,
|
479 |
+
io::CodedInputStream* input) = 0;
|
480 |
+
virtual void WriteMessage(int number,
|
481 |
+
io::CodedOutputStream* output) const = 0;
|
482 |
+
virtual uint8* WriteMessageToArray(int number, uint8* target) const = 0;
|
483 |
+
virtual uint8* InternalWriteMessageToArray(int number, bool,
|
484 |
+
uint8* target) const {
|
485 |
+
// TODO(gpike): make this pure virtual. This is a placeholder because we
|
486 |
+
// need to update third_party/upb, for example.
|
487 |
+
return WriteMessageToArray(number, target);
|
488 |
+
}
|
489 |
+
|
490 |
+
private:
|
491 |
+
virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable.
|
492 |
+
|
493 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(LazyMessageExtension);
|
494 |
+
};
|
495 |
+
struct Extension {
|
496 |
+
// The order of these fields packs Extension into 24 bytes when using 8
|
497 |
+
// byte alignment. Consider this when adding or removing fields here.
|
498 |
+
union {
|
499 |
+
int32 int32_value;
|
500 |
+
int64 int64_value;
|
501 |
+
uint32 uint32_value;
|
502 |
+
uint64 uint64_value;
|
503 |
+
float float_value;
|
504 |
+
double double_value;
|
505 |
+
bool bool_value;
|
506 |
+
int enum_value;
|
507 |
+
string* string_value;
|
508 |
+
MessageLite* message_value;
|
509 |
+
LazyMessageExtension* lazymessage_value;
|
510 |
+
|
511 |
+
RepeatedField <int32 >* repeated_int32_value;
|
512 |
+
RepeatedField <int64 >* repeated_int64_value;
|
513 |
+
RepeatedField <uint32 >* repeated_uint32_value;
|
514 |
+
RepeatedField <uint64 >* repeated_uint64_value;
|
515 |
+
RepeatedField <float >* repeated_float_value;
|
516 |
+
RepeatedField <double >* repeated_double_value;
|
517 |
+
RepeatedField <bool >* repeated_bool_value;
|
518 |
+
RepeatedField <int >* repeated_enum_value;
|
519 |
+
RepeatedPtrField<string >* repeated_string_value;
|
520 |
+
RepeatedPtrField<MessageLite>* repeated_message_value;
|
521 |
+
};
|
522 |
+
|
523 |
+
FieldType type;
|
524 |
+
bool is_repeated;
|
525 |
+
|
526 |
+
// For singular types, indicates if the extension is "cleared". This
|
527 |
+
// happens when an extension is set and then later cleared by the caller.
|
528 |
+
// We want to keep the Extension object around for reuse, so instead of
|
529 |
+
// removing it from the map, we just set is_cleared = true. This has no
|
530 |
+
// meaning for repeated types; for those, the size of the RepeatedField
|
531 |
+
// simply becomes zero when cleared.
|
532 |
+
bool is_cleared : 4;
|
533 |
+
|
534 |
+
// For singular message types, indicates whether lazy parsing is enabled
|
535 |
+
// for this extension. This field is only valid when type == TYPE_MESSAGE
|
536 |
+
// and !is_repeated because we only support lazy parsing for singular
|
537 |
+
// message types currently. If is_lazy = true, the extension is stored in
|
538 |
+
// lazymessage_value. Otherwise, the extension will be message_value.
|
539 |
+
bool is_lazy : 4;
|
540 |
+
|
541 |
+
// For repeated types, this indicates if the [packed=true] option is set.
|
542 |
+
bool is_packed;
|
543 |
+
|
544 |
+
// For packed fields, the size of the packed data is recorded here when
|
545 |
+
// ByteSize() is called then used during serialization.
|
546 |
+
// TODO(kenton): Use atomic<int> when C++ supports it.
|
547 |
+
mutable int cached_size;
|
548 |
+
|
549 |
+
// The descriptor for this extension, if one exists and is known. May be
|
550 |
+
// NULL. Must not be NULL if the descriptor for the extension does not
|
551 |
+
// live in the same pool as the descriptor for the containing type.
|
552 |
+
const FieldDescriptor* descriptor;
|
553 |
+
|
554 |
+
// Some helper methods for operations on a single Extension.
|
555 |
+
void SerializeFieldWithCachedSizes(
|
556 |
+
int number,
|
557 |
+
io::CodedOutputStream* output) const;
|
558 |
+
uint8* InternalSerializeFieldWithCachedSizesToArray(
|
559 |
+
int number,
|
560 |
+
bool deterministic,
|
561 |
+
uint8* target) const;
|
562 |
+
void SerializeMessageSetItemWithCachedSizes(
|
563 |
+
int number,
|
564 |
+
io::CodedOutputStream* output) const;
|
565 |
+
uint8* InternalSerializeMessageSetItemWithCachedSizesToArray(
|
566 |
+
int number,
|
567 |
+
bool deterministic,
|
568 |
+
uint8* target) const;
|
569 |
+
size_t ByteSize(int number) const;
|
570 |
+
size_t MessageSetItemByteSize(int number) const;
|
571 |
+
void Clear();
|
572 |
+
int GetSize() const;
|
573 |
+
void Free();
|
574 |
+
size_t SpaceUsedExcludingSelfLong() const;
|
575 |
+
bool IsInitialized() const;
|
576 |
+
};
|
577 |
+
|
578 |
+
// The Extension struct is small enough to be passed by value, so we use it
|
579 |
+
// directly as the value type in mappings rather than use pointers. We use
|
580 |
+
// sorted maps rather than hash-maps because we expect most ExtensionSets will
|
581 |
+
// only contain a small number of extension. Also, we want AppendToList and
|
582 |
+
// deterministic serialization to order fields by field number.
|
583 |
+
|
584 |
+
struct KeyValue {
|
585 |
+
int first;
|
586 |
+
Extension second;
|
587 |
+
|
588 |
+
struct FirstComparator {
|
589 |
+
bool operator()(const KeyValue& lhs, const KeyValue& rhs) const {
|
590 |
+
return lhs.first < rhs.first;
|
591 |
+
}
|
592 |
+
bool operator()(const KeyValue& lhs, int key) const {
|
593 |
+
return lhs.first < key;
|
594 |
+
}
|
595 |
+
bool operator()(int key, const KeyValue& rhs) const {
|
596 |
+
return key < rhs.first;
|
597 |
+
}
|
598 |
+
};
|
599 |
+
};
|
600 |
+
|
601 |
+
typedef std::map<int, Extension> LargeMap;
|
602 |
+
|
603 |
+
// Wrapper API that switches between flat-map and LargeMap.
|
604 |
+
|
605 |
+
// Finds a key (if present) in the ExtensionSet.
|
606 |
+
const Extension* FindOrNull(int key) const;
|
607 |
+
Extension* FindOrNull(int key);
|
608 |
+
|
609 |
+
// Helper-functions that only inspect the LargeMap.
|
610 |
+
const Extension* FindOrNullInLargeMap(int key) const;
|
611 |
+
Extension* FindOrNullInLargeMap(int key);
|
612 |
+
|
613 |
+
// Inserts a new (key, Extension) into the ExtensionSet (and returns true), or
|
614 |
+
// finds the already-existing Extension for that key (returns false).
|
615 |
+
// The Extension* will point to the new-or-found Extension.
|
616 |
+
std::pair<Extension*, bool> Insert(int key);
|
617 |
+
|
618 |
+
// Grows the flat_capacity_.
|
619 |
+
// If flat_capacity_ > kMaximumFlatCapacity, converts to LargeMap.
|
620 |
+
void GrowCapacity(size_t minimum_new_capacity);
|
621 |
+
static constexpr uint16 kMaximumFlatCapacity = 256;
|
622 |
+
bool is_large() const { return flat_capacity_ > kMaximumFlatCapacity; }
|
623 |
+
|
624 |
+
// Removes a key from the ExtensionSet.
|
625 |
+
void Erase(int key);
|
626 |
+
|
627 |
+
size_t Size() const {
|
628 |
+
return GOOGLE_PREDICT_FALSE(is_large()) ? map_.large->size() : flat_size_;
|
629 |
+
}
|
630 |
+
|
631 |
+
// Similar to std::for_each.
|
632 |
+
// Each Iterator is decomposed into ->first and ->second fields, so
|
633 |
+
// that the KeyValueFunctor can be agnostic vis-a-vis KeyValue-vs-std::pair.
|
634 |
+
template <typename Iterator, typename KeyValueFunctor>
|
635 |
+
static KeyValueFunctor ForEach(Iterator begin, Iterator end,
|
636 |
+
KeyValueFunctor func) {
|
637 |
+
for (Iterator it = begin; it != end; ++it) func(it->first, it->second);
|
638 |
+
return std::move(func);
|
639 |
+
}
|
640 |
+
|
641 |
+
// Applies a functor to the <int, Extension&> pairs in sorted order.
|
642 |
+
template <typename KeyValueFunctor>
|
643 |
+
KeyValueFunctor ForEach(KeyValueFunctor func) {
|
644 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
645 |
+
return ForEach(map_.large->begin(), map_.large->end(), std::move(func));
|
646 |
+
}
|
647 |
+
return ForEach(flat_begin(), flat_end(), std::move(func));
|
648 |
+
}
|
649 |
+
|
650 |
+
// Applies a functor to the <int, const Extension&> pairs in sorted order.
|
651 |
+
template <typename KeyValueFunctor>
|
652 |
+
KeyValueFunctor ForEach(KeyValueFunctor func) const {
|
653 |
+
if (GOOGLE_PREDICT_FALSE(is_large())) {
|
654 |
+
return ForEach(map_.large->begin(), map_.large->end(), std::move(func));
|
655 |
+
}
|
656 |
+
return ForEach(flat_begin(), flat_end(), std::move(func));
|
657 |
+
}
|
658 |
+
|
659 |
+
// Merges existing Extension from other_extension
|
660 |
+
void InternalExtensionMergeFrom(int number, const Extension& other_extension);
|
661 |
+
|
662 |
+
// Returns true and fills field_number and extension if extension is found.
|
663 |
+
// Note to support packed repeated field compatibility, it also fills whether
|
664 |
+
// the tag on wire is packed, which can be different from
|
665 |
+
// extension->is_packed (whether packed=true is specified).
|
666 |
+
bool FindExtensionInfoFromTag(uint32 tag, ExtensionFinder* extension_finder,
|
667 |
+
int* field_number, ExtensionInfo* extension,
|
668 |
+
bool* was_packed_on_wire);
|
669 |
+
|
670 |
+
// Returns true and fills extension if extension is found.
|
671 |
+
// Note to support packed repeated field compatibility, it also fills whether
|
672 |
+
// the tag on wire is packed, which can be different from
|
673 |
+
// extension->is_packed (whether packed=true is specified).
|
674 |
+
bool FindExtensionInfoFromFieldNumber(int wire_type, int field_number,
|
675 |
+
ExtensionFinder* extension_finder,
|
676 |
+
ExtensionInfo* extension,
|
677 |
+
bool* was_packed_on_wire);
|
678 |
+
|
679 |
+
// Parses a single extension from the input. The input should start out
|
680 |
+
// positioned immediately after the wire tag. This method is called in
|
681 |
+
// ParseField() after field number and was_packed_on_wire is extracted from
|
682 |
+
// the wire tag and ExtensionInfo is found by the field number.
|
683 |
+
bool ParseFieldWithExtensionInfo(int field_number,
|
684 |
+
bool was_packed_on_wire,
|
685 |
+
const ExtensionInfo& extension,
|
686 |
+
io::CodedInputStream* input,
|
687 |
+
FieldSkipper* field_skipper);
|
688 |
+
|
689 |
+
// Like ParseField(), but this method may parse singular message extensions
|
690 |
+
// lazily depending on the value of FLAGS_eagerly_parse_message_sets.
|
691 |
+
bool ParseFieldMaybeLazily(int wire_type, int field_number,
|
692 |
+
io::CodedInputStream* input,
|
693 |
+
ExtensionFinder* extension_finder,
|
694 |
+
MessageSetFieldSkipper* field_skipper);
|
695 |
+
|
696 |
+
// Gets the extension with the given number, creating it if it does not
|
697 |
+
// already exist. Returns true if the extension did not already exist.
|
698 |
+
bool MaybeNewExtension(int number, const FieldDescriptor* descriptor,
|
699 |
+
Extension** result);
|
700 |
+
|
701 |
+
// Gets the repeated extension for the given descriptor, creating it if
|
702 |
+
// it does not exist.
|
703 |
+
Extension* MaybeNewRepeatedExtension(const FieldDescriptor* descriptor);
|
704 |
+
|
705 |
+
// Parse a single MessageSet item -- called just after the item group start
|
706 |
+
// tag has been read.
|
707 |
+
bool ParseMessageSetItem(io::CodedInputStream* input,
|
708 |
+
ExtensionFinder* extension_finder,
|
709 |
+
MessageSetFieldSkipper* field_skipper);
|
710 |
+
|
711 |
+
// Hack: RepeatedPtrFieldBase declares ExtensionSet as a friend. This
|
712 |
+
// friendship should automatically extend to ExtensionSet::Extension, but
|
713 |
+
// unfortunately some older compilers (e.g. GCC 3.4.4) do not implement this
|
714 |
+
// correctly. So, we must provide helpers for calling methods of that
|
715 |
+
// class.
|
716 |
+
|
717 |
+
// Defined in extension_set_heavy.cc.
|
718 |
+
static inline size_t RepeatedMessage_SpaceUsedExcludingSelfLong(
|
719 |
+
RepeatedPtrFieldBase* field);
|
720 |
+
|
721 |
+
KeyValue* flat_begin() {
|
722 |
+
assert(!is_large());
|
723 |
+
return map_.flat;
|
724 |
+
}
|
725 |
+
const KeyValue* flat_begin() const {
|
726 |
+
assert(!is_large());
|
727 |
+
return map_.flat;
|
728 |
+
}
|
729 |
+
KeyValue* flat_end() {
|
730 |
+
assert(!is_large());
|
731 |
+
return map_.flat + flat_size_;
|
732 |
+
}
|
733 |
+
const KeyValue* flat_end() const {
|
734 |
+
assert(!is_large());
|
735 |
+
return map_.flat + flat_size_;
|
736 |
+
}
|
737 |
+
|
738 |
+
::google::protobuf::Arena* arena_;
|
739 |
+
|
740 |
+
// Manual memory-management:
|
741 |
+
// map_.flat is an allocated array of flat_capacity_ elements.
|
742 |
+
// [map_.flat, map_.flat + flat_size_) is the currently-in-use prefix.
|
743 |
+
uint16 flat_capacity_;
|
744 |
+
uint16 flat_size_;
|
745 |
+
union AllocatedData {
|
746 |
+
KeyValue* flat;
|
747 |
+
|
748 |
+
// If flat_capacity_ > kMaximumFlatCapacity, switch to LargeMap,
|
749 |
+
// which guarantees O(n lg n) CPU but larger constant factors.
|
750 |
+
LargeMap* large;
|
751 |
+
} map_;
|
752 |
+
|
753 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ExtensionSet);
|
754 |
+
};
|
755 |
+
|
756 |
+
// These are just for convenience...
|
757 |
+
inline void ExtensionSet::SetString(int number, FieldType type,
|
758 |
+
const string& value,
|
759 |
+
const FieldDescriptor* descriptor) {
|
760 |
+
MutableString(number, type, descriptor)->assign(value);
|
761 |
+
}
|
762 |
+
inline void ExtensionSet::SetRepeatedString(int number, int index,
|
763 |
+
const string& value) {
|
764 |
+
MutableRepeatedString(number, index)->assign(value);
|
765 |
+
}
|
766 |
+
inline void ExtensionSet::AddString(int number, FieldType type,
|
767 |
+
const string& value,
|
768 |
+
const FieldDescriptor* descriptor) {
|
769 |
+
AddString(number, type, descriptor)->assign(value);
|
770 |
+
}
|
771 |
+
|
772 |
+
// ===================================================================
|
773 |
+
// Glue for generated extension accessors
|
774 |
+
|
775 |
+
// -------------------------------------------------------------------
|
776 |
+
// Template magic
|
777 |
+
|
778 |
+
// First we have a set of classes representing "type traits" for different
|
779 |
+
// field types. A type traits class knows how to implement basic accessors
|
780 |
+
// for extensions of a particular type given an ExtensionSet. The signature
|
781 |
+
// for a type traits class looks like this:
|
782 |
+
//
|
783 |
+
// class TypeTraits {
|
784 |
+
// public:
|
785 |
+
// typedef ? ConstType;
|
786 |
+
// typedef ? MutableType;
|
787 |
+
// // TypeTraits for singular fields and repeated fields will define the
|
788 |
+
// // symbol "Singular" or "Repeated" respectively. These two symbols will
|
789 |
+
// // be used in extension accessors to distinguish between singular
|
790 |
+
// // extensions and repeated extensions. If the TypeTraits for the passed
|
791 |
+
// // in extension doesn't have the expected symbol defined, it means the
|
792 |
+
// // user is passing a repeated extension to a singular accessor, or the
|
793 |
+
// // opposite. In that case the C++ compiler will generate an error
|
794 |
+
// // message "no matching member function" to inform the user.
|
795 |
+
// typedef ? Singular
|
796 |
+
// typedef ? Repeated
|
797 |
+
//
|
798 |
+
// static inline ConstType Get(int number, const ExtensionSet& set);
|
799 |
+
// static inline void Set(int number, ConstType value, ExtensionSet* set);
|
800 |
+
// static inline MutableType Mutable(int number, ExtensionSet* set);
|
801 |
+
//
|
802 |
+
// // Variants for repeated fields.
|
803 |
+
// static inline ConstType Get(int number, const ExtensionSet& set,
|
804 |
+
// int index);
|
805 |
+
// static inline void Set(int number, int index,
|
806 |
+
// ConstType value, ExtensionSet* set);
|
807 |
+
// static inline MutableType Mutable(int number, int index,
|
808 |
+
// ExtensionSet* set);
|
809 |
+
// static inline void Add(int number, ConstType value, ExtensionSet* set);
|
810 |
+
// static inline MutableType Add(int number, ExtensionSet* set);
|
811 |
+
// This is used by the ExtensionIdentifier constructor to register
|
812 |
+
// the extension at dynamic initialization.
|
813 |
+
// template <typename ExtendeeT>
|
814 |
+
// static void Register(int number, FieldType type, bool is_packed);
|
815 |
+
// };
|
816 |
+
//
|
817 |
+
// Not all of these methods make sense for all field types. For example, the
|
818 |
+
// "Mutable" methods only make sense for strings and messages, and the
|
819 |
+
// repeated methods only make sense for repeated types. So, each type
|
820 |
+
// traits class implements only the set of methods from this signature that it
|
821 |
+
// actually supports. This will cause a compiler error if the user tries to
|
822 |
+
// access an extension using a method that doesn't make sense for its type.
|
823 |
+
// For example, if "foo" is an extension of type "optional int32", then if you
|
824 |
+
// try to write code like:
|
825 |
+
// my_message.MutableExtension(foo)
|
826 |
+
// you will get a compile error because PrimitiveTypeTraits<int32> does not
|
827 |
+
// have a "Mutable()" method.
|
828 |
+
|
829 |
+
// -------------------------------------------------------------------
|
830 |
+
// PrimitiveTypeTraits
|
831 |
+
|
832 |
+
// Since the ExtensionSet has different methods for each primitive type,
|
833 |
+
// we must explicitly define the methods of the type traits class for each
|
834 |
+
// known type.
|
835 |
+
template <typename Type>
|
836 |
+
class PrimitiveTypeTraits {
|
837 |
+
public:
|
838 |
+
typedef Type ConstType;
|
839 |
+
typedef Type MutableType;
|
840 |
+
typedef PrimitiveTypeTraits<Type> Singular;
|
841 |
+
|
842 |
+
static inline ConstType Get(int number, const ExtensionSet& set,
|
843 |
+
ConstType default_value);
|
844 |
+
static inline void Set(int number, FieldType field_type,
|
845 |
+
ConstType value, ExtensionSet* set);
|
846 |
+
template <typename ExtendeeT>
|
847 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
848 |
+
ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number,
|
849 |
+
type, false, is_packed);
|
850 |
+
}
|
851 |
+
};
|
852 |
+
|
853 |
+
template <typename Type>
|
854 |
+
class RepeatedPrimitiveTypeTraits {
|
855 |
+
public:
|
856 |
+
typedef Type ConstType;
|
857 |
+
typedef Type MutableType;
|
858 |
+
typedef RepeatedPrimitiveTypeTraits<Type> Repeated;
|
859 |
+
|
860 |
+
typedef RepeatedField<Type> RepeatedFieldType;
|
861 |
+
|
862 |
+
static inline Type Get(int number, const ExtensionSet& set, int index);
|
863 |
+
static inline void Set(int number, int index, Type value, ExtensionSet* set);
|
864 |
+
static inline void Add(int number, FieldType field_type,
|
865 |
+
bool is_packed, Type value, ExtensionSet* set);
|
866 |
+
|
867 |
+
static inline const RepeatedField<ConstType>&
|
868 |
+
GetRepeated(int number, const ExtensionSet& set);
|
869 |
+
static inline RepeatedField<Type>*
|
870 |
+
MutableRepeated(int number, FieldType field_type,
|
871 |
+
bool is_packed, ExtensionSet* set);
|
872 |
+
|
873 |
+
static const RepeatedFieldType* GetDefaultRepeatedField();
|
874 |
+
template <typename ExtendeeT>
|
875 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
876 |
+
ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number,
|
877 |
+
type, true, is_packed);
|
878 |
+
}
|
879 |
+
};
|
880 |
+
|
881 |
+
LIBPROTOBUF_EXPORT extern ProtobufOnceType repeated_primitive_generic_type_traits_once_init_;
|
882 |
+
|
883 |
+
class LIBPROTOBUF_EXPORT RepeatedPrimitiveDefaults {
|
884 |
+
private:
|
885 |
+
template<typename Type> friend class RepeatedPrimitiveTypeTraits;
|
886 |
+
static const RepeatedPrimitiveDefaults* default_instance();
|
887 |
+
RepeatedField<int32> default_repeated_field_int32_;
|
888 |
+
RepeatedField<int64> default_repeated_field_int64_;
|
889 |
+
RepeatedField<uint32> default_repeated_field_uint32_;
|
890 |
+
RepeatedField<uint64> default_repeated_field_uint64_;
|
891 |
+
RepeatedField<double> default_repeated_field_double_;
|
892 |
+
RepeatedField<float> default_repeated_field_float_;
|
893 |
+
RepeatedField<bool> default_repeated_field_bool_;
|
894 |
+
};
|
895 |
+
|
896 |
+
#define PROTOBUF_DEFINE_PRIMITIVE_TYPE(TYPE, METHOD) \
|
897 |
+
template<> inline TYPE PrimitiveTypeTraits<TYPE>::Get( \
|
898 |
+
int number, const ExtensionSet& set, TYPE default_value) { \
|
899 |
+
return set.Get##METHOD(number, default_value); \
|
900 |
+
} \
|
901 |
+
template<> inline void PrimitiveTypeTraits<TYPE>::Set( \
|
902 |
+
int number, FieldType field_type, TYPE value, ExtensionSet* set) { \
|
903 |
+
set->Set##METHOD(number, field_type, value, NULL); \
|
904 |
+
} \
|
905 |
+
\
|
906 |
+
template<> inline TYPE RepeatedPrimitiveTypeTraits<TYPE>::Get( \
|
907 |
+
int number, const ExtensionSet& set, int index) { \
|
908 |
+
return set.GetRepeated##METHOD(number, index); \
|
909 |
+
} \
|
910 |
+
template<> inline void RepeatedPrimitiveTypeTraits<TYPE>::Set( \
|
911 |
+
int number, int index, TYPE value, ExtensionSet* set) { \
|
912 |
+
set->SetRepeated##METHOD(number, index, value); \
|
913 |
+
} \
|
914 |
+
template<> inline void RepeatedPrimitiveTypeTraits<TYPE>::Add( \
|
915 |
+
int number, FieldType field_type, bool is_packed, \
|
916 |
+
TYPE value, ExtensionSet* set) { \
|
917 |
+
set->Add##METHOD(number, field_type, is_packed, value, NULL); \
|
918 |
+
} \
|
919 |
+
template<> inline const RepeatedField<TYPE>* \
|
920 |
+
RepeatedPrimitiveTypeTraits<TYPE>::GetDefaultRepeatedField() { \
|
921 |
+
return &RepeatedPrimitiveDefaults::default_instance() \
|
922 |
+
->default_repeated_field_##TYPE##_; \
|
923 |
+
} \
|
924 |
+
template<> inline const RepeatedField<TYPE>& \
|
925 |
+
RepeatedPrimitiveTypeTraits<TYPE>::GetRepeated(int number, \
|
926 |
+
const ExtensionSet& set) { \
|
927 |
+
return *reinterpret_cast<const RepeatedField<TYPE>*>( \
|
928 |
+
set.GetRawRepeatedField( \
|
929 |
+
number, GetDefaultRepeatedField())); \
|
930 |
+
} \
|
931 |
+
template<> inline RepeatedField<TYPE>* \
|
932 |
+
RepeatedPrimitiveTypeTraits<TYPE>::MutableRepeated(int number, \
|
933 |
+
FieldType field_type, \
|
934 |
+
bool is_packed, \
|
935 |
+
ExtensionSet* set) { \
|
936 |
+
return reinterpret_cast<RepeatedField<TYPE>*>( \
|
937 |
+
set->MutableRawRepeatedField(number, field_type, is_packed, NULL)); \
|
938 |
+
}
|
939 |
+
|
940 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE( int32, Int32)
|
941 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE( int64, Int64)
|
942 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE(uint32, UInt32)
|
943 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE(uint64, UInt64)
|
944 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE( float, Float)
|
945 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE(double, Double)
|
946 |
+
PROTOBUF_DEFINE_PRIMITIVE_TYPE( bool, Bool)
|
947 |
+
|
948 |
+
#undef PROTOBUF_DEFINE_PRIMITIVE_TYPE
|
949 |
+
|
950 |
+
// -------------------------------------------------------------------
|
951 |
+
// StringTypeTraits
|
952 |
+
|
953 |
+
// Strings support both Set() and Mutable().
|
954 |
+
class LIBPROTOBUF_EXPORT StringTypeTraits {
|
955 |
+
public:
|
956 |
+
typedef const string& ConstType;
|
957 |
+
typedef string* MutableType;
|
958 |
+
typedef StringTypeTraits Singular;
|
959 |
+
|
960 |
+
static inline const string& Get(int number, const ExtensionSet& set,
|
961 |
+
ConstType default_value) {
|
962 |
+
return set.GetString(number, default_value);
|
963 |
+
}
|
964 |
+
static inline void Set(int number, FieldType field_type,
|
965 |
+
const string& value, ExtensionSet* set) {
|
966 |
+
set->SetString(number, field_type, value, NULL);
|
967 |
+
}
|
968 |
+
static inline string* Mutable(int number, FieldType field_type,
|
969 |
+
ExtensionSet* set) {
|
970 |
+
return set->MutableString(number, field_type, NULL);
|
971 |
+
}
|
972 |
+
template <typename ExtendeeT>
|
973 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
974 |
+
ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number,
|
975 |
+
type, false, is_packed);
|
976 |
+
}
|
977 |
+
};
|
978 |
+
|
979 |
+
class LIBPROTOBUF_EXPORT RepeatedStringTypeTraits {
|
980 |
+
public:
|
981 |
+
typedef const string& ConstType;
|
982 |
+
typedef string* MutableType;
|
983 |
+
typedef RepeatedStringTypeTraits Repeated;
|
984 |
+
|
985 |
+
typedef RepeatedPtrField<string> RepeatedFieldType;
|
986 |
+
|
987 |
+
static inline const string& Get(int number, const ExtensionSet& set,
|
988 |
+
int index) {
|
989 |
+
return set.GetRepeatedString(number, index);
|
990 |
+
}
|
991 |
+
static inline void Set(int number, int index,
|
992 |
+
const string& value, ExtensionSet* set) {
|
993 |
+
set->SetRepeatedString(number, index, value);
|
994 |
+
}
|
995 |
+
static inline string* Mutable(int number, int index, ExtensionSet* set) {
|
996 |
+
return set->MutableRepeatedString(number, index);
|
997 |
+
}
|
998 |
+
static inline void Add(int number, FieldType field_type,
|
999 |
+
bool /*is_packed*/, const string& value,
|
1000 |
+
ExtensionSet* set) {
|
1001 |
+
set->AddString(number, field_type, value, NULL);
|
1002 |
+
}
|
1003 |
+
static inline string* Add(int number, FieldType field_type,
|
1004 |
+
ExtensionSet* set) {
|
1005 |
+
return set->AddString(number, field_type, NULL);
|
1006 |
+
}
|
1007 |
+
static inline const RepeatedPtrField<string>&
|
1008 |
+
GetRepeated(int number, const ExtensionSet& set) {
|
1009 |
+
return *reinterpret_cast<const RepeatedPtrField<string>*>(
|
1010 |
+
set.GetRawRepeatedField(number, GetDefaultRepeatedField()));
|
1011 |
+
}
|
1012 |
+
|
1013 |
+
static inline RepeatedPtrField<string>*
|
1014 |
+
MutableRepeated(int number, FieldType field_type,
|
1015 |
+
bool is_packed, ExtensionSet* set) {
|
1016 |
+
return reinterpret_cast<RepeatedPtrField<string>*>(
|
1017 |
+
set->MutableRawRepeatedField(number, field_type,
|
1018 |
+
is_packed, NULL));
|
1019 |
+
}
|
1020 |
+
|
1021 |
+
static const RepeatedFieldType* GetDefaultRepeatedField();
|
1022 |
+
|
1023 |
+
template <typename ExtendeeT>
|
1024 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
1025 |
+
ExtensionSet::RegisterExtension(&ExtendeeT::default_instance(), number,
|
1026 |
+
type, true, is_packed);
|
1027 |
+
}
|
1028 |
+
|
1029 |
+
private:
|
1030 |
+
static void InitializeDefaultRepeatedFields();
|
1031 |
+
static void DestroyDefaultRepeatedFields();
|
1032 |
+
};
|
1033 |
+
|
1034 |
+
// -------------------------------------------------------------------
|
1035 |
+
// EnumTypeTraits
|
1036 |
+
|
1037 |
+
// ExtensionSet represents enums using integers internally, so we have to
|
1038 |
+
// static_cast around.
|
1039 |
+
template <typename Type, bool IsValid(int)>
|
1040 |
+
class EnumTypeTraits {
|
1041 |
+
public:
|
1042 |
+
typedef Type ConstType;
|
1043 |
+
typedef Type MutableType;
|
1044 |
+
typedef EnumTypeTraits<Type, IsValid> Singular;
|
1045 |
+
|
1046 |
+
static inline ConstType Get(int number, const ExtensionSet& set,
|
1047 |
+
ConstType default_value) {
|
1048 |
+
return static_cast<Type>(set.GetEnum(number, default_value));
|
1049 |
+
}
|
1050 |
+
static inline void Set(int number, FieldType field_type,
|
1051 |
+
ConstType value, ExtensionSet* set) {
|
1052 |
+
GOOGLE_DCHECK(IsValid(value));
|
1053 |
+
set->SetEnum(number, field_type, value, NULL);
|
1054 |
+
}
|
1055 |
+
template <typename ExtendeeT>
|
1056 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
1057 |
+
ExtensionSet::RegisterEnumExtension(&ExtendeeT::default_instance(), number,
|
1058 |
+
type, false, is_packed, IsValid);
|
1059 |
+
}
|
1060 |
+
};
|
1061 |
+
|
1062 |
+
template <typename Type, bool IsValid(int)>
|
1063 |
+
class RepeatedEnumTypeTraits {
|
1064 |
+
public:
|
1065 |
+
typedef Type ConstType;
|
1066 |
+
typedef Type MutableType;
|
1067 |
+
typedef RepeatedEnumTypeTraits<Type, IsValid> Repeated;
|
1068 |
+
|
1069 |
+
typedef RepeatedField<Type> RepeatedFieldType;
|
1070 |
+
|
1071 |
+
static inline ConstType Get(int number, const ExtensionSet& set, int index) {
|
1072 |
+
return static_cast<Type>(set.GetRepeatedEnum(number, index));
|
1073 |
+
}
|
1074 |
+
static inline void Set(int number, int index,
|
1075 |
+
ConstType value, ExtensionSet* set) {
|
1076 |
+
GOOGLE_DCHECK(IsValid(value));
|
1077 |
+
set->SetRepeatedEnum(number, index, value);
|
1078 |
+
}
|
1079 |
+
static inline void Add(int number, FieldType field_type,
|
1080 |
+
bool is_packed, ConstType value, ExtensionSet* set) {
|
1081 |
+
GOOGLE_DCHECK(IsValid(value));
|
1082 |
+
set->AddEnum(number, field_type, is_packed, value, NULL);
|
1083 |
+
}
|
1084 |
+
static inline const RepeatedField<Type>& GetRepeated(int number,
|
1085 |
+
const ExtensionSet&
|
1086 |
+
set) {
|
1087 |
+
// Hack: the `Extension` struct stores a RepeatedField<int> for enums.
|
1088 |
+
// RepeatedField<int> cannot implicitly convert to RepeatedField<EnumType>
|
1089 |
+
// so we need to do some casting magic. See message.h for similar
|
1090 |
+
// contortions for non-extension fields.
|
1091 |
+
return *reinterpret_cast<const RepeatedField<Type>*>(
|
1092 |
+
set.GetRawRepeatedField(number, GetDefaultRepeatedField()));
|
1093 |
+
}
|
1094 |
+
|
1095 |
+
static inline RepeatedField<Type>* MutableRepeated(int number,
|
1096 |
+
FieldType field_type,
|
1097 |
+
bool is_packed,
|
1098 |
+
ExtensionSet* set) {
|
1099 |
+
return reinterpret_cast<RepeatedField<Type>*>(
|
1100 |
+
set->MutableRawRepeatedField(number, field_type, is_packed, NULL));
|
1101 |
+
}
|
1102 |
+
|
1103 |
+
static const RepeatedFieldType* GetDefaultRepeatedField() {
|
1104 |
+
// Hack: as noted above, repeated enum fields are internally stored as a
|
1105 |
+
// RepeatedField<int>. We need to be able to instantiate global static
|
1106 |
+
// objects to return as default (empty) repeated fields on non-existent
|
1107 |
+
// extensions. We would not be able to know a-priori all of the enum types
|
1108 |
+
// (values of |Type|) to instantiate all of these, so we just re-use int32's
|
1109 |
+
// default repeated field object.
|
1110 |
+
return reinterpret_cast<const RepeatedField<Type>*>(
|
1111 |
+
RepeatedPrimitiveTypeTraits<int32>::GetDefaultRepeatedField());
|
1112 |
+
}
|
1113 |
+
template <typename ExtendeeT>
|
1114 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
1115 |
+
ExtensionSet::RegisterEnumExtension(&ExtendeeT::default_instance(), number,
|
1116 |
+
type, true, is_packed, IsValid);
|
1117 |
+
}
|
1118 |
+
};
|
1119 |
+
|
1120 |
+
// -------------------------------------------------------------------
|
1121 |
+
// MessageTypeTraits
|
1122 |
+
|
1123 |
+
// ExtensionSet guarantees that when manipulating extensions with message
|
1124 |
+
// types, the implementation used will be the compiled-in class representing
|
1125 |
+
// that type. So, we can static_cast down to the exact type we expect.
|
1126 |
+
template <typename Type>
|
1127 |
+
class MessageTypeTraits {
|
1128 |
+
public:
|
1129 |
+
typedef const Type& ConstType;
|
1130 |
+
typedef Type* MutableType;
|
1131 |
+
typedef MessageTypeTraits<Type> Singular;
|
1132 |
+
|
1133 |
+
static inline ConstType Get(int number, const ExtensionSet& set,
|
1134 |
+
ConstType default_value) {
|
1135 |
+
return static_cast<const Type&>(
|
1136 |
+
set.GetMessage(number, default_value));
|
1137 |
+
}
|
1138 |
+
static inline MutableType Mutable(int number, FieldType field_type,
|
1139 |
+
ExtensionSet* set) {
|
1140 |
+
return static_cast<Type*>(
|
1141 |
+
set->MutableMessage(number, field_type, Type::default_instance(), NULL));
|
1142 |
+
}
|
1143 |
+
static inline void SetAllocated(int number, FieldType field_type,
|
1144 |
+
MutableType message, ExtensionSet* set) {
|
1145 |
+
set->SetAllocatedMessage(number, field_type, NULL, message);
|
1146 |
+
}
|
1147 |
+
static inline void UnsafeArenaSetAllocated(int number, FieldType field_type,
|
1148 |
+
MutableType message,
|
1149 |
+
ExtensionSet* set) {
|
1150 |
+
set->UnsafeArenaSetAllocatedMessage(number, field_type, NULL, message);
|
1151 |
+
}
|
1152 |
+
static inline MutableType Release(int number, FieldType /* field_type */,
|
1153 |
+
ExtensionSet* set) {
|
1154 |
+
return static_cast<Type*>(set->ReleaseMessage(
|
1155 |
+
number, Type::default_instance()));
|
1156 |
+
}
|
1157 |
+
static inline MutableType UnsafeArenaRelease(int number,
|
1158 |
+
FieldType /* field_type */,
|
1159 |
+
ExtensionSet* set) {
|
1160 |
+
return static_cast<Type*>(set->UnsafeArenaReleaseMessage(
|
1161 |
+
number, Type::default_instance()));
|
1162 |
+
}
|
1163 |
+
template <typename ExtendeeT>
|
1164 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
1165 |
+
ExtensionSet::RegisterMessageExtension(&ExtendeeT::default_instance(),
|
1166 |
+
number, type, false, is_packed,
|
1167 |
+
&Type::default_instance());
|
1168 |
+
}
|
1169 |
+
};
|
1170 |
+
|
1171 |
+
// forward declaration
|
1172 |
+
class RepeatedMessageGenericTypeTraits;
|
1173 |
+
|
1174 |
+
template <typename Type>
|
1175 |
+
class RepeatedMessageTypeTraits {
|
1176 |
+
public:
|
1177 |
+
typedef const Type& ConstType;
|
1178 |
+
typedef Type* MutableType;
|
1179 |
+
typedef RepeatedMessageTypeTraits<Type> Repeated;
|
1180 |
+
|
1181 |
+
typedef RepeatedPtrField<Type> RepeatedFieldType;
|
1182 |
+
|
1183 |
+
static inline ConstType Get(int number, const ExtensionSet& set, int index) {
|
1184 |
+
return static_cast<const Type&>(set.GetRepeatedMessage(number, index));
|
1185 |
+
}
|
1186 |
+
static inline MutableType Mutable(int number, int index, ExtensionSet* set) {
|
1187 |
+
return static_cast<Type*>(set->MutableRepeatedMessage(number, index));
|
1188 |
+
}
|
1189 |
+
static inline MutableType Add(int number, FieldType field_type,
|
1190 |
+
ExtensionSet* set) {
|
1191 |
+
return static_cast<Type*>(
|
1192 |
+
set->AddMessage(number, field_type, Type::default_instance(), NULL));
|
1193 |
+
}
|
1194 |
+
static inline const RepeatedPtrField<Type>& GetRepeated(int number,
|
1195 |
+
const ExtensionSet&
|
1196 |
+
set) {
|
1197 |
+
// See notes above in RepeatedEnumTypeTraits::GetRepeated(): same
|
1198 |
+
// casting hack applies here, because a RepeatedPtrField<MessageLite>
|
1199 |
+
// cannot naturally become a RepeatedPtrType<Type> even though Type is
|
1200 |
+
// presumably a message. google::protobuf::Message goes through similar contortions
|
1201 |
+
// with a reinterpret_cast<>.
|
1202 |
+
return *reinterpret_cast<const RepeatedPtrField<Type>*>(
|
1203 |
+
set.GetRawRepeatedField(number, GetDefaultRepeatedField()));
|
1204 |
+
}
|
1205 |
+
static inline RepeatedPtrField<Type>* MutableRepeated(int number,
|
1206 |
+
FieldType field_type,
|
1207 |
+
bool is_packed,
|
1208 |
+
ExtensionSet* set) {
|
1209 |
+
return reinterpret_cast<RepeatedPtrField<Type>*>(
|
1210 |
+
set->MutableRawRepeatedField(number, field_type, is_packed, NULL));
|
1211 |
+
}
|
1212 |
+
|
1213 |
+
static const RepeatedFieldType* GetDefaultRepeatedField();
|
1214 |
+
template <typename ExtendeeT>
|
1215 |
+
static void Register(int number, FieldType type, bool is_packed) {
|
1216 |
+
ExtensionSet::RegisterMessageExtension(&ExtendeeT::default_instance(),
|
1217 |
+
number, type, true, is_packed,
|
1218 |
+
&Type::default_instance());
|
1219 |
+
}
|
1220 |
+
};
|
1221 |
+
|
1222 |
+
template<typename Type> inline
|
1223 |
+
const typename RepeatedMessageTypeTraits<Type>::RepeatedFieldType*
|
1224 |
+
RepeatedMessageTypeTraits<Type>::GetDefaultRepeatedField() {
|
1225 |
+
static auto instance = OnShutdownDelete(new RepeatedFieldType);
|
1226 |
+
return instance;
|
1227 |
+
}
|
1228 |
+
|
1229 |
+
// -------------------------------------------------------------------
|
1230 |
+
// ExtensionIdentifier
|
1231 |
+
|
1232 |
+
// This is the type of actual extension objects. E.g. if you have:
|
1233 |
+
// extends Foo with optional int32 bar = 1234;
|
1234 |
+
// then "bar" will be defined in C++ as:
|
1235 |
+
// ExtensionIdentifier<Foo, PrimitiveTypeTraits<int32>, 1, false> bar(1234);
|
1236 |
+
//
|
1237 |
+
// Note that we could, in theory, supply the field number as a template
|
1238 |
+
// parameter, and thus make an instance of ExtensionIdentifier have no
|
1239 |
+
// actual contents. However, if we did that, then using at extension
|
1240 |
+
// identifier would not necessarily cause the compiler to output any sort
|
1241 |
+
// of reference to any symbol defined in the extension's .pb.o file. Some
|
1242 |
+
// linkers will actually drop object files that are not explicitly referenced,
|
1243 |
+
// but that would be bad because it would cause this extension to not be
|
1244 |
+
// registered at static initialization, and therefore using it would crash.
|
1245 |
+
|
1246 |
+
template <typename ExtendeeType, typename TypeTraitsType,
|
1247 |
+
FieldType field_type, bool is_packed>
|
1248 |
+
class ExtensionIdentifier {
|
1249 |
+
public:
|
1250 |
+
typedef TypeTraitsType TypeTraits;
|
1251 |
+
typedef ExtendeeType Extendee;
|
1252 |
+
|
1253 |
+
ExtensionIdentifier(int number, typename TypeTraits::ConstType default_value)
|
1254 |
+
: number_(number), default_value_(default_value) {
|
1255 |
+
Register(number);
|
1256 |
+
}
|
1257 |
+
inline int number() const { return number_; }
|
1258 |
+
typename TypeTraits::ConstType default_value() const {
|
1259 |
+
return default_value_;
|
1260 |
+
}
|
1261 |
+
|
1262 |
+
static void Register(int number) {
|
1263 |
+
TypeTraits::template Register<ExtendeeType>(number, field_type, is_packed);
|
1264 |
+
}
|
1265 |
+
|
1266 |
+
private:
|
1267 |
+
const int number_;
|
1268 |
+
typename TypeTraits::ConstType default_value_;
|
1269 |
+
};
|
1270 |
+
|
1271 |
+
// -------------------------------------------------------------------
|
1272 |
+
// Generated accessors
|
1273 |
+
|
1274 |
+
// This macro should be expanded in the context of a generated type which
|
1275 |
+
// has extensions.
|
1276 |
+
//
|
1277 |
+
// We use "_proto_TypeTraits" as a type name below because "TypeTraits"
|
1278 |
+
// causes problems if the class has a nested message or enum type with that
|
1279 |
+
// name and "_TypeTraits" is technically reserved for the C++ library since
|
1280 |
+
// it starts with an underscore followed by a capital letter.
|
1281 |
+
//
|
1282 |
+
// For similar reason, we use "_field_type" and "_is_packed" as parameter names
|
1283 |
+
// below, so that "field_type" and "is_packed" can be used as field names.
|
1284 |
+
#define GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(CLASSNAME) \
|
1285 |
+
/* Has, Size, Clear */ \
|
1286 |
+
template <typename _proto_TypeTraits, \
|
1287 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1288 |
+
bool _is_packed> \
|
1289 |
+
inline bool HasExtension( \
|
1290 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1291 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \
|
1292 |
+
return _extensions_.Has(id.number()); \
|
1293 |
+
} \
|
1294 |
+
\
|
1295 |
+
template <typename _proto_TypeTraits, \
|
1296 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1297 |
+
bool _is_packed> \
|
1298 |
+
inline void ClearExtension( \
|
1299 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1300 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \
|
1301 |
+
_extensions_.ClearExtension(id.number()); \
|
1302 |
+
} \
|
1303 |
+
\
|
1304 |
+
template <typename _proto_TypeTraits, \
|
1305 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1306 |
+
bool _is_packed> \
|
1307 |
+
inline int ExtensionSize( \
|
1308 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1309 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \
|
1310 |
+
return _extensions_.ExtensionSize(id.number()); \
|
1311 |
+
} \
|
1312 |
+
\
|
1313 |
+
/* Singular accessors */ \
|
1314 |
+
template <typename _proto_TypeTraits, \
|
1315 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1316 |
+
bool _is_packed> \
|
1317 |
+
inline typename _proto_TypeTraits::Singular::ConstType GetExtension( \
|
1318 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1319 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const { \
|
1320 |
+
return _proto_TypeTraits::Get(id.number(), _extensions_, \
|
1321 |
+
id.default_value()); \
|
1322 |
+
} \
|
1323 |
+
\
|
1324 |
+
template <typename _proto_TypeTraits, \
|
1325 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1326 |
+
bool _is_packed> \
|
1327 |
+
inline typename _proto_TypeTraits::Singular::MutableType MutableExtension( \
|
1328 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1329 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \
|
1330 |
+
return _proto_TypeTraits::Mutable(id.number(), _field_type, \
|
1331 |
+
&_extensions_); \
|
1332 |
+
} \
|
1333 |
+
\
|
1334 |
+
template <typename _proto_TypeTraits, \
|
1335 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1336 |
+
bool _is_packed> \
|
1337 |
+
inline void SetExtension( \
|
1338 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1339 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1340 |
+
typename _proto_TypeTraits::Singular::ConstType value) { \
|
1341 |
+
_proto_TypeTraits::Set(id.number(), _field_type, value, &_extensions_); \
|
1342 |
+
} \
|
1343 |
+
\
|
1344 |
+
template <typename _proto_TypeTraits, \
|
1345 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1346 |
+
bool _is_packed> \
|
1347 |
+
inline void SetAllocatedExtension( \
|
1348 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1349 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1350 |
+
typename _proto_TypeTraits::Singular::MutableType value) { \
|
1351 |
+
_proto_TypeTraits::SetAllocated(id.number(), _field_type, \
|
1352 |
+
value, &_extensions_); \
|
1353 |
+
} \
|
1354 |
+
template <typename _proto_TypeTraits, \
|
1355 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1356 |
+
bool _is_packed> \
|
1357 |
+
inline void UnsafeArenaSetAllocatedExtension( \
|
1358 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1359 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1360 |
+
typename _proto_TypeTraits::Singular::MutableType value) { \
|
1361 |
+
_proto_TypeTraits::UnsafeArenaSetAllocated(id.number(), _field_type, \
|
1362 |
+
value, &_extensions_); \
|
1363 |
+
} \
|
1364 |
+
template <typename _proto_TypeTraits, \
|
1365 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1366 |
+
bool _is_packed> \
|
1367 |
+
inline typename _proto_TypeTraits::Singular::MutableType ReleaseExtension( \
|
1368 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1369 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \
|
1370 |
+
return _proto_TypeTraits::Release(id.number(), _field_type, \
|
1371 |
+
&_extensions_); \
|
1372 |
+
} \
|
1373 |
+
template <typename _proto_TypeTraits, \
|
1374 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1375 |
+
bool _is_packed> \
|
1376 |
+
inline typename _proto_TypeTraits::Singular::MutableType \
|
1377 |
+
UnsafeArenaReleaseExtension( \
|
1378 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1379 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \
|
1380 |
+
return _proto_TypeTraits::UnsafeArenaRelease(id.number(), _field_type, \
|
1381 |
+
&_extensions_); \
|
1382 |
+
} \
|
1383 |
+
\
|
1384 |
+
/* Repeated accessors */ \
|
1385 |
+
template <typename _proto_TypeTraits, \
|
1386 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1387 |
+
bool _is_packed> \
|
1388 |
+
inline typename _proto_TypeTraits::Repeated::ConstType GetExtension( \
|
1389 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1390 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1391 |
+
int index) const { \
|
1392 |
+
return _proto_TypeTraits::Get(id.number(), _extensions_, index); \
|
1393 |
+
} \
|
1394 |
+
\
|
1395 |
+
template <typename _proto_TypeTraits, \
|
1396 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1397 |
+
bool _is_packed> \
|
1398 |
+
inline typename _proto_TypeTraits::Repeated::MutableType MutableExtension( \
|
1399 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1400 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1401 |
+
int index) { \
|
1402 |
+
return _proto_TypeTraits::Mutable(id.number(), index, &_extensions_); \
|
1403 |
+
} \
|
1404 |
+
\
|
1405 |
+
template <typename _proto_TypeTraits, \
|
1406 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1407 |
+
bool _is_packed> \
|
1408 |
+
inline void SetExtension( \
|
1409 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1410 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1411 |
+
int index, typename _proto_TypeTraits::Repeated::ConstType value) { \
|
1412 |
+
_proto_TypeTraits::Set(id.number(), index, value, &_extensions_); \
|
1413 |
+
} \
|
1414 |
+
\
|
1415 |
+
template <typename _proto_TypeTraits, \
|
1416 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1417 |
+
bool _is_packed> \
|
1418 |
+
inline typename _proto_TypeTraits::Repeated::MutableType AddExtension( \
|
1419 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1420 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) { \
|
1421 |
+
return _proto_TypeTraits::Add(id.number(), _field_type, &_extensions_); \
|
1422 |
+
} \
|
1423 |
+
\
|
1424 |
+
template <typename _proto_TypeTraits, \
|
1425 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1426 |
+
bool _is_packed> \
|
1427 |
+
inline void AddExtension( \
|
1428 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1429 |
+
CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id, \
|
1430 |
+
typename _proto_TypeTraits::Repeated::ConstType value) { \
|
1431 |
+
_proto_TypeTraits::Add(id.number(), _field_type, _is_packed, \
|
1432 |
+
value, &_extensions_); \
|
1433 |
+
} \
|
1434 |
+
\
|
1435 |
+
template <typename _proto_TypeTraits, \
|
1436 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1437 |
+
bool _is_packed> \
|
1438 |
+
inline const typename _proto_TypeTraits::Repeated::RepeatedFieldType& \
|
1439 |
+
GetRepeatedExtension( \
|
1440 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1441 |
+
CLASSNAME, _proto_TypeTraits, _field_type, \
|
1442 |
+
_is_packed>& id) const { \
|
1443 |
+
return _proto_TypeTraits::GetRepeated(id.number(), _extensions_); \
|
1444 |
+
} \
|
1445 |
+
\
|
1446 |
+
template <typename _proto_TypeTraits, \
|
1447 |
+
::google::protobuf::internal::FieldType _field_type, \
|
1448 |
+
bool _is_packed> \
|
1449 |
+
inline typename _proto_TypeTraits::Repeated::RepeatedFieldType* \
|
1450 |
+
MutableRepeatedExtension( \
|
1451 |
+
const ::google::protobuf::internal::ExtensionIdentifier< \
|
1452 |
+
CLASSNAME, _proto_TypeTraits, _field_type, \
|
1453 |
+
_is_packed>& id) { \
|
1454 |
+
return _proto_TypeTraits::MutableRepeated(id.number(), _field_type, \
|
1455 |
+
_is_packed, &_extensions_); \
|
1456 |
+
}
|
1457 |
+
|
1458 |
+
} // namespace internal
|
1459 |
+
} // namespace protobuf
|
1460 |
+
|
1461 |
+
} // namespace google
|
1462 |
+
#endif // GOOGLE_PROTOBUF_EXTENSION_SET_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_enum_util.h
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__
|
32 |
+
#define GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__
|
33 |
+
|
34 |
+
#include <type_traits>
|
35 |
+
|
36 |
+
namespace google {
|
37 |
+
namespace protobuf {
|
38 |
+
|
39 |
+
// This type trait can be used to cause templates to only match proto2 enum
|
40 |
+
// types.
|
41 |
+
template <typename T> struct is_proto_enum : ::std::false_type {};
|
42 |
+
|
43 |
+
} // namespace protobuf
|
44 |
+
|
45 |
+
} // namespace google
|
46 |
+
#endif // GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven.h
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__
|
32 |
+
#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/map.h>
|
35 |
+
#include <google/protobuf/map_entry_lite.h>
|
36 |
+
#include <google/protobuf/map_field_lite.h>
|
37 |
+
#include <google/protobuf/message_lite.h>
|
38 |
+
#include <google/protobuf/wire_format_lite.h>
|
39 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
40 |
+
|
41 |
+
// We require C++11 and Clang to use constexpr for variables, as GCC 4.8
|
42 |
+
// requires constexpr to be consistent between declarations of variables
|
43 |
+
// unnecessarily (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58541).
|
44 |
+
// VS 2017 Update 3 also supports this usage of constexpr.
|
45 |
+
#if defined(__clang__) || (defined(_MSC_VER) && _MSC_VER >= 1911)
|
46 |
+
#define PROTOBUF_CONSTEXPR_VAR constexpr
|
47 |
+
#else // !__clang__
|
48 |
+
#define PROTOBUF_CONSTEXPR_VAR
|
49 |
+
#endif // !_clang
|
50 |
+
|
51 |
+
namespace google {
|
52 |
+
namespace protobuf {
|
53 |
+
namespace internal {
|
54 |
+
|
55 |
+
// Processing-type masks.
|
56 |
+
static constexpr const unsigned char kOneofMask = 0x40;
|
57 |
+
static constexpr const unsigned char kRepeatedMask = 0x20;
|
58 |
+
// Mask for the raw type: either a WireFormatLite::FieldType or one of the
|
59 |
+
// ProcessingTypes below, without the oneof or repeated flag.
|
60 |
+
static constexpr const unsigned char kTypeMask = 0x1f;
|
61 |
+
|
62 |
+
// Wire type masks.
|
63 |
+
static constexpr const unsigned char kNotPackedMask = 0x10;
|
64 |
+
static constexpr const unsigned char kInvalidMask = 0x20;
|
65 |
+
|
66 |
+
enum ProcessingTypes {
|
67 |
+
TYPE_STRING_INLINED = 23,
|
68 |
+
TYPE_BYTES_INLINED = 24,
|
69 |
+
TYPE_MAP = 25,
|
70 |
+
};
|
71 |
+
|
72 |
+
static_assert(TYPE_MAP < kRepeatedMask, "Invalid enum");
|
73 |
+
|
74 |
+
// TODO(ckennelly): Add a static assertion to ensure that these masks do not
|
75 |
+
// conflict with wiretypes.
|
76 |
+
|
77 |
+
// ParseTableField is kept small to help simplify instructions for computing
|
78 |
+
// offsets, as we will always need this information to parse a field.
|
79 |
+
// Additional data, needed for some types, is stored in
|
80 |
+
// AuxillaryParseTableField.
|
81 |
+
struct ParseTableField {
|
82 |
+
uint32 offset;
|
83 |
+
// The presence_index ordinarily represents a has_bit index, but for fields
|
84 |
+
// inside a oneof it represents the index in _oneof_case_.
|
85 |
+
uint32 presence_index;
|
86 |
+
unsigned char normal_wiretype;
|
87 |
+
unsigned char packed_wiretype;
|
88 |
+
|
89 |
+
// processing_type is given by:
|
90 |
+
// (FieldDescriptor->type() << 1) | FieldDescriptor->is_packed()
|
91 |
+
unsigned char processing_type;
|
92 |
+
|
93 |
+
unsigned char tag_size;
|
94 |
+
};
|
95 |
+
|
96 |
+
struct ParseTable;
|
97 |
+
|
98 |
+
union AuxillaryParseTableField {
|
99 |
+
typedef bool (*EnumValidator)(int);
|
100 |
+
|
101 |
+
// Enums
|
102 |
+
struct enum_aux {
|
103 |
+
EnumValidator validator;
|
104 |
+
};
|
105 |
+
enum_aux enums;
|
106 |
+
// Group, messages
|
107 |
+
struct message_aux {
|
108 |
+
// ExplicitlyInitialized<T> -> T requires a reinterpret_cast, which prevents
|
109 |
+
// the tables from being constructed as a constexpr. We use void to avoid
|
110 |
+
// the cast.
|
111 |
+
const void* default_message_void;
|
112 |
+
const MessageLite* default_message() const {
|
113 |
+
return static_cast<const MessageLite*>(default_message_void);
|
114 |
+
}
|
115 |
+
};
|
116 |
+
message_aux messages;
|
117 |
+
// Strings
|
118 |
+
struct string_aux {
|
119 |
+
const void* default_ptr;
|
120 |
+
const char* field_name;
|
121 |
+
};
|
122 |
+
string_aux strings;
|
123 |
+
|
124 |
+
struct map_aux {
|
125 |
+
bool (*parse_map)(io::CodedInputStream*, void*);
|
126 |
+
};
|
127 |
+
map_aux maps;
|
128 |
+
|
129 |
+
AuxillaryParseTableField() = default;
|
130 |
+
constexpr AuxillaryParseTableField(AuxillaryParseTableField::enum_aux e)
|
131 |
+
: enums(e) {}
|
132 |
+
constexpr AuxillaryParseTableField(AuxillaryParseTableField::message_aux m)
|
133 |
+
: messages(m) {}
|
134 |
+
constexpr AuxillaryParseTableField(AuxillaryParseTableField::string_aux s)
|
135 |
+
: strings(s) {}
|
136 |
+
constexpr AuxillaryParseTableField(AuxillaryParseTableField::map_aux m)
|
137 |
+
: maps(m) {}
|
138 |
+
};
|
139 |
+
|
140 |
+
struct ParseTable {
|
141 |
+
const ParseTableField* fields;
|
142 |
+
const AuxillaryParseTableField* aux;
|
143 |
+
int max_field_number;
|
144 |
+
// TODO(ckennelly): Do something with this padding.
|
145 |
+
|
146 |
+
// TODO(ckennelly): Vet these for sign extension.
|
147 |
+
int64 has_bits_offset;
|
148 |
+
int64 oneof_case_offset;
|
149 |
+
int64 extension_offset;
|
150 |
+
int64 arena_offset;
|
151 |
+
|
152 |
+
// ExplicitlyInitialized<T> -> T requires a reinterpret_cast, which prevents
|
153 |
+
// the tables from being constructed as a constexpr. We use void to avoid
|
154 |
+
// the cast.
|
155 |
+
const void* default_instance_void;
|
156 |
+
const MessageLite* default_instance() const {
|
157 |
+
return static_cast<const MessageLite*>(default_instance_void);
|
158 |
+
}
|
159 |
+
|
160 |
+
bool unknown_field_set;
|
161 |
+
};
|
162 |
+
|
163 |
+
static_assert(sizeof(ParseTableField) <= 16, "ParseTableField is too large");
|
164 |
+
// The tables must be composed of POD components to ensure link-time
|
165 |
+
// initialization.
|
166 |
+
static_assert(std::is_pod<ParseTableField>::value, "");
|
167 |
+
static_assert(std::is_pod<AuxillaryParseTableField::enum_aux>::value, "");
|
168 |
+
static_assert(std::is_pod<AuxillaryParseTableField::message_aux>::value, "");
|
169 |
+
static_assert(std::is_pod<AuxillaryParseTableField::string_aux>::value, "");
|
170 |
+
static_assert(std::is_pod<ParseTable>::value, "");
|
171 |
+
|
172 |
+
#ifndef __NVCC__ // This assertion currently fails under NVCC.
|
173 |
+
static_assert(std::is_pod<AuxillaryParseTableField>::value, "");
|
174 |
+
#endif
|
175 |
+
|
176 |
+
// TODO(ckennelly): Consolidate these implementations into a single one, using
|
177 |
+
// dynamic dispatch to the appropriate unknown field handler.
|
178 |
+
bool MergePartialFromCodedStream(MessageLite* msg, const ParseTable& table,
|
179 |
+
io::CodedInputStream* input);
|
180 |
+
bool MergePartialFromCodedStreamLite(MessageLite* msg, const ParseTable& table,
|
181 |
+
io::CodedInputStream* input);
|
182 |
+
|
183 |
+
template <typename Entry>
|
184 |
+
bool ParseMap(io::CodedInputStream* input, void* map_field) {
|
185 |
+
typedef typename MapEntryToMapField<Entry>::MapFieldType MapFieldType;
|
186 |
+
typedef google::protobuf::Map<typename Entry::EntryKeyType,
|
187 |
+
typename Entry::EntryValueType>
|
188 |
+
MapType;
|
189 |
+
typedef typename Entry::template Parser<MapFieldType, MapType> ParserType;
|
190 |
+
|
191 |
+
ParserType parser(static_cast<MapFieldType*>(map_field));
|
192 |
+
return ::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(input,
|
193 |
+
&parser);
|
194 |
+
}
|
195 |
+
|
196 |
+
} // namespace internal
|
197 |
+
} // namespace protobuf
|
198 |
+
|
199 |
+
} // namespace google
|
200 |
+
#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_table_driven_lite.h
ADDED
@@ -0,0 +1,873 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__
|
32 |
+
#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/generated_message_table_driven.h>
|
35 |
+
|
36 |
+
#include <google/protobuf/io/zero_copy_stream_impl_lite.h>
|
37 |
+
#include <google/protobuf/extension_set.h>
|
38 |
+
#include <google/protobuf/implicit_weak_message.h>
|
39 |
+
#include <google/protobuf/inlined_string_field.h>
|
40 |
+
#include <google/protobuf/metadata_lite.h>
|
41 |
+
#include <google/protobuf/repeated_field.h>
|
42 |
+
#include <google/protobuf/wire_format_lite.h>
|
43 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
44 |
+
#include <type_traits>
|
45 |
+
|
46 |
+
|
47 |
+
namespace google {
|
48 |
+
namespace protobuf {
|
49 |
+
namespace internal {
|
50 |
+
|
51 |
+
|
52 |
+
enum StringType {
|
53 |
+
StringType_STRING = 0,
|
54 |
+
StringType_INLINED = 3
|
55 |
+
};
|
56 |
+
|
57 |
+
// Logically a superset of StringType, consisting of all field types that
|
58 |
+
// require special initialization.
|
59 |
+
enum ProcessingType {
|
60 |
+
ProcessingType_STRING = 0,
|
61 |
+
ProcessingType_CORD = 1,
|
62 |
+
ProcessingType_STRING_PIECE = 2,
|
63 |
+
ProcessingType_INLINED = 3,
|
64 |
+
ProcessingType_MESSAGE = 4,
|
65 |
+
};
|
66 |
+
|
67 |
+
enum Cardinality {
|
68 |
+
Cardinality_SINGULAR = 0,
|
69 |
+
Cardinality_REPEATED = 1,
|
70 |
+
Cardinality_ONEOF = 3
|
71 |
+
};
|
72 |
+
|
73 |
+
template <typename Type>
|
74 |
+
inline Type* Raw(MessageLite* msg, int64 offset) {
|
75 |
+
return reinterpret_cast<Type*>(reinterpret_cast<uint8*>(msg) + offset);
|
76 |
+
}
|
77 |
+
|
78 |
+
template <typename Type>
|
79 |
+
inline const Type* Raw(const MessageLite* msg, int64 offset) {
|
80 |
+
return reinterpret_cast<const Type*>(reinterpret_cast<const uint8*>(msg) +
|
81 |
+
offset);
|
82 |
+
}
|
83 |
+
|
84 |
+
template <typename InternalMetadata>
|
85 |
+
inline Arena* GetArena(MessageLite* msg, int64 arena_offset) {
|
86 |
+
if (GOOGLE_PREDICT_FALSE(arena_offset == -1)) {
|
87 |
+
return NULL;
|
88 |
+
}
|
89 |
+
|
90 |
+
return Raw<InternalMetadata>(msg, arena_offset)->arena();
|
91 |
+
}
|
92 |
+
|
93 |
+
inline ExtensionSet* GetExtensionSet(MessageLite* msg, int64 extension_offset) {
|
94 |
+
if (extension_offset == -1) {
|
95 |
+
return NULL;
|
96 |
+
}
|
97 |
+
|
98 |
+
return Raw<ExtensionSet>(msg, extension_offset);
|
99 |
+
}
|
100 |
+
|
101 |
+
template <typename Type>
|
102 |
+
inline Type* AddField(MessageLite* msg, int64 offset) {
|
103 |
+
static_assert(std::is_pod<Type>::value ||
|
104 |
+
std::is_same<Type, InlinedStringField>::value,
|
105 |
+
"Do not assign");
|
106 |
+
|
107 |
+
google::protobuf::RepeatedField<Type>* repeated =
|
108 |
+
Raw<google::protobuf::RepeatedField<Type> >(msg, offset);
|
109 |
+
return repeated->Add();
|
110 |
+
}
|
111 |
+
|
112 |
+
template <>
|
113 |
+
inline string* AddField<string>(MessageLite* msg, int64 offset) {
|
114 |
+
google::protobuf::RepeatedPtrField<string>* repeated =
|
115 |
+
Raw<google::protobuf::RepeatedPtrField<string> >(msg, offset);
|
116 |
+
return repeated->Add();
|
117 |
+
}
|
118 |
+
|
119 |
+
|
120 |
+
template <typename Type>
|
121 |
+
inline void AddField(MessageLite* msg, int64 offset, Type value) {
|
122 |
+
static_assert(std::is_pod<Type>::value,
|
123 |
+
"Do not assign");
|
124 |
+
*AddField<Type>(msg, offset) = value;
|
125 |
+
}
|
126 |
+
|
127 |
+
inline void SetBit(uint32* has_bits, uint32 has_bit_index) {
|
128 |
+
GOOGLE_DCHECK(has_bits != nullptr);
|
129 |
+
|
130 |
+
uint32 mask = static_cast<uint32>(1u) << (has_bit_index % 32);
|
131 |
+
has_bits[has_bit_index / 32u] |= mask;
|
132 |
+
}
|
133 |
+
|
134 |
+
template <typename Type>
|
135 |
+
inline Type* MutableField(MessageLite* msg, uint32* has_bits,
|
136 |
+
uint32 has_bit_index, int64 offset) {
|
137 |
+
SetBit(has_bits, has_bit_index);
|
138 |
+
return Raw<Type>(msg, offset);
|
139 |
+
}
|
140 |
+
|
141 |
+
template <typename Type>
|
142 |
+
inline void SetField(MessageLite* msg, uint32* has_bits, uint32 has_bit_index,
|
143 |
+
int64 offset, Type value) {
|
144 |
+
static_assert(std::is_pod<Type>::value,
|
145 |
+
"Do not assign");
|
146 |
+
*MutableField<Type>(msg, has_bits, has_bit_index, offset) = value;
|
147 |
+
}
|
148 |
+
|
149 |
+
template <typename Type>
|
150 |
+
inline void SetOneofField(MessageLite* msg, uint32* oneof_case,
|
151 |
+
uint32 oneof_case_index, int64 offset,
|
152 |
+
int field_number, Type value) {
|
153 |
+
oneof_case[oneof_case_index] = field_number;
|
154 |
+
*Raw<Type>(msg, offset) = value;
|
155 |
+
}
|
156 |
+
|
157 |
+
// Clears a oneof field. The field argument should correspond to the particular
|
158 |
+
// field that is currently set in the oneof.
|
159 |
+
inline void ClearOneofField(const ParseTableField& field, Arena* arena,
|
160 |
+
MessageLite* msg) {
|
161 |
+
switch (field.processing_type & kTypeMask) {
|
162 |
+
case WireFormatLite::TYPE_MESSAGE:
|
163 |
+
if (arena == NULL) {
|
164 |
+
delete *Raw<MessageLite*>(msg, field.offset);
|
165 |
+
}
|
166 |
+
break;
|
167 |
+
|
168 |
+
case WireFormatLite::TYPE_STRING:
|
169 |
+
case WireFormatLite::TYPE_BYTES:
|
170 |
+
Raw<ArenaStringPtr>(msg, field.offset)
|
171 |
+
->Destroy(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), arena);
|
172 |
+
break;
|
173 |
+
|
174 |
+
case TYPE_STRING_INLINED:
|
175 |
+
case TYPE_BYTES_INLINED:
|
176 |
+
Raw<InlinedStringField>(msg, field.offset)->DestroyNoArena(NULL);
|
177 |
+
break;
|
178 |
+
|
179 |
+
default:
|
180 |
+
// No cleanup needed.
|
181 |
+
break;
|
182 |
+
}
|
183 |
+
}
|
184 |
+
|
185 |
+
// Clears and reinitializes a oneof field as necessary, in preparation for
|
186 |
+
// parsing a new value with type field_type and field number field_number.
|
187 |
+
//
|
188 |
+
// Note: the oneof_case argument should point directly to the _oneof_case_
|
189 |
+
// element corresponding to this particular oneof, not to the beginning of the
|
190 |
+
// _oneof_case_ array.
|
191 |
+
template <ProcessingType field_type>
|
192 |
+
inline void ResetOneofField(const ParseTable& table, int field_number,
|
193 |
+
Arena* arena, MessageLite* msg, uint32* oneof_case,
|
194 |
+
int64 offset, const void* default_ptr) {
|
195 |
+
if (*oneof_case == field_number) {
|
196 |
+
// The oneof is already set to the right type, so there is no need to clear
|
197 |
+
// it.
|
198 |
+
return;
|
199 |
+
}
|
200 |
+
|
201 |
+
if (*oneof_case != 0) {
|
202 |
+
ClearOneofField(table.fields[*oneof_case], arena, msg);
|
203 |
+
}
|
204 |
+
*oneof_case = field_number;
|
205 |
+
|
206 |
+
switch (field_type) {
|
207 |
+
case ProcessingType_STRING:
|
208 |
+
Raw<ArenaStringPtr>(msg, offset)
|
209 |
+
->UnsafeSetDefault(static_cast<const string*>(default_ptr));
|
210 |
+
break;
|
211 |
+
case ProcessingType_INLINED:
|
212 |
+
new (Raw<InlinedStringField>(msg, offset))
|
213 |
+
InlinedStringField(*static_cast<const string*>(default_ptr));
|
214 |
+
break;
|
215 |
+
case ProcessingType_MESSAGE:
|
216 |
+
MessageLite** submessage = Raw<MessageLite*>(msg, offset);
|
217 |
+
const MessageLite* prototype =
|
218 |
+
table.aux[field_number].messages.default_message();
|
219 |
+
*submessage = prototype->New(arena);
|
220 |
+
break;
|
221 |
+
}
|
222 |
+
}
|
223 |
+
|
224 |
+
template <Cardinality cardinality, bool validate, StringType ctype>
|
225 |
+
static inline bool HandleString(io::CodedInputStream* input, MessageLite* msg,
|
226 |
+
Arena* arena, uint32* has_bits,
|
227 |
+
uint32 has_bit_index, int64 offset,
|
228 |
+
const void* default_ptr,
|
229 |
+
const char* field_name) {
|
230 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
231 |
+
const char* sdata;
|
232 |
+
size_t size;
|
233 |
+
#endif
|
234 |
+
|
235 |
+
switch (ctype) {
|
236 |
+
case StringType_INLINED: {
|
237 |
+
InlinedStringField* s;
|
238 |
+
switch (cardinality) {
|
239 |
+
case Cardinality_SINGULAR:
|
240 |
+
// TODO(ckennelly): Is this optimal?
|
241 |
+
s = MutableField<InlinedStringField>(
|
242 |
+
msg, has_bits, has_bit_index, offset);
|
243 |
+
break;
|
244 |
+
case Cardinality_REPEATED:
|
245 |
+
s = AddField<InlinedStringField>(msg, offset);
|
246 |
+
break;
|
247 |
+
case Cardinality_ONEOF:
|
248 |
+
s = Raw<InlinedStringField>(msg, offset);
|
249 |
+
break;
|
250 |
+
}
|
251 |
+
GOOGLE_DCHECK(s != nullptr);
|
252 |
+
::std::string* value = s->MutableNoArena(NULL);
|
253 |
+
|
254 |
+
if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadString(input, value))) {
|
255 |
+
return false;
|
256 |
+
}
|
257 |
+
|
258 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
259 |
+
sdata = value->data();
|
260 |
+
size = value->size();
|
261 |
+
#endif
|
262 |
+
break;
|
263 |
+
}
|
264 |
+
case StringType_STRING: {
|
265 |
+
string* value;
|
266 |
+
switch (cardinality) {
|
267 |
+
case Cardinality_SINGULAR:
|
268 |
+
// TODO(ckennelly): Is this optimal?
|
269 |
+
value =
|
270 |
+
MutableField<ArenaStringPtr>(msg, has_bits, has_bit_index, offset)
|
271 |
+
->Mutable(static_cast<const string*>(default_ptr), arena);
|
272 |
+
break;
|
273 |
+
case Cardinality_REPEATED:
|
274 |
+
value = AddField<string>(msg, offset);
|
275 |
+
break;
|
276 |
+
case Cardinality_ONEOF:
|
277 |
+
value = Raw<ArenaStringPtr>(msg, offset)
|
278 |
+
->Mutable(static_cast<const string*>(default_ptr), arena);
|
279 |
+
break;
|
280 |
+
}
|
281 |
+
GOOGLE_DCHECK(value != nullptr);
|
282 |
+
|
283 |
+
if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadString(input, value))) {
|
284 |
+
return false;
|
285 |
+
}
|
286 |
+
|
287 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
288 |
+
sdata = value->data();
|
289 |
+
size = value->size();
|
290 |
+
#endif
|
291 |
+
break;
|
292 |
+
}
|
293 |
+
}
|
294 |
+
|
295 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
296 |
+
if (validate) {
|
297 |
+
WireFormatLite::VerifyUtf8String(sdata, size, WireFormatLite::PARSE,
|
298 |
+
field_name);
|
299 |
+
}
|
300 |
+
#endif
|
301 |
+
|
302 |
+
return true;
|
303 |
+
}
|
304 |
+
|
305 |
+
template <typename UnknownFieldHandler, typename InternalMetadata,
|
306 |
+
Cardinality cardinality>
|
307 |
+
inline bool HandleEnum(const ParseTable& table, io::CodedInputStream* input,
|
308 |
+
MessageLite* msg, uint32* presence,
|
309 |
+
uint32 presence_index, int64 offset, uint32 tag,
|
310 |
+
int field_number) {
|
311 |
+
int value;
|
312 |
+
if (GOOGLE_PREDICT_FALSE(
|
313 |
+
(!WireFormatLite::ReadPrimitive<int, WireFormatLite::TYPE_ENUM>(
|
314 |
+
input, &value)))) {
|
315 |
+
return false;
|
316 |
+
}
|
317 |
+
|
318 |
+
AuxillaryParseTableField::EnumValidator validator =
|
319 |
+
table.aux[field_number].enums.validator;
|
320 |
+
if (validator(value)) {
|
321 |
+
switch (cardinality) {
|
322 |
+
case Cardinality_SINGULAR:
|
323 |
+
SetField(msg, presence, presence_index, offset, value);
|
324 |
+
break;
|
325 |
+
case Cardinality_REPEATED:
|
326 |
+
AddField(msg, offset, value);
|
327 |
+
break;
|
328 |
+
case Cardinality_ONEOF:
|
329 |
+
ClearOneofField(table.fields[presence[presence_index]],
|
330 |
+
GetArena<InternalMetadata>(msg, table.arena_offset),
|
331 |
+
msg);
|
332 |
+
SetOneofField(msg, presence, presence_index, offset, field_number,
|
333 |
+
value);
|
334 |
+
break;
|
335 |
+
}
|
336 |
+
} else {
|
337 |
+
UnknownFieldHandler::Varint(msg, table, tag, value);
|
338 |
+
}
|
339 |
+
|
340 |
+
return true;
|
341 |
+
}
|
342 |
+
|
343 |
+
// RepeatedMessageTypeHandler allows us to operate on RepeatedPtrField fields
|
344 |
+
// without instantiating the specific template.
|
345 |
+
class RepeatedMessageTypeHandler {
|
346 |
+
public:
|
347 |
+
typedef MessageLite Type;
|
348 |
+
typedef MessageLite WeakType;
|
349 |
+
static Arena* GetArena(Type* t) { return t->GetArena(); }
|
350 |
+
static void* GetMaybeArenaPointer(Type* t) {
|
351 |
+
return t->GetMaybeArenaPointer();
|
352 |
+
}
|
353 |
+
static inline Type* NewFromPrototype(const Type* prototype,
|
354 |
+
Arena* arena = NULL) {
|
355 |
+
return prototype->New(arena);
|
356 |
+
}
|
357 |
+
static void Delete(Type* t, Arena* arena = NULL) {
|
358 |
+
if (arena == NULL) {
|
359 |
+
delete t;
|
360 |
+
}
|
361 |
+
}
|
362 |
+
};
|
363 |
+
|
364 |
+
class MergePartialFromCodedStreamHelper {
|
365 |
+
public:
|
366 |
+
static MessageLite* Add(RepeatedPtrFieldBase* field,
|
367 |
+
const MessageLite* prototype) {
|
368 |
+
return field->Add<RepeatedMessageTypeHandler>(
|
369 |
+
const_cast<MessageLite*>(prototype));
|
370 |
+
}
|
371 |
+
};
|
372 |
+
|
373 |
+
template <typename UnknownFieldHandler, typename InternalMetadata>
|
374 |
+
bool MergePartialFromCodedStreamImpl(MessageLite* msg, const ParseTable& table,
|
375 |
+
io::CodedInputStream* input) {
|
376 |
+
// We require that has_bits are present, as to avoid having to check for them
|
377 |
+
// for every field.
|
378 |
+
//
|
379 |
+
// TODO(ckennelly): Make this a compile-time parameter with templates.
|
380 |
+
GOOGLE_DCHECK_GE(table.has_bits_offset, 0);
|
381 |
+
uint32* has_bits = Raw<uint32>(msg, table.has_bits_offset);
|
382 |
+
GOOGLE_DCHECK(has_bits != NULL);
|
383 |
+
|
384 |
+
while (true) {
|
385 |
+
uint32 tag = input->ReadTag();
|
386 |
+
|
387 |
+
const WireFormatLite::WireType wire_type =
|
388 |
+
WireFormatLite::GetTagWireType(tag);
|
389 |
+
const int field_number = WireFormatLite::GetTagFieldNumber(tag);
|
390 |
+
|
391 |
+
if (field_number > table.max_field_number) {
|
392 |
+
// check for possible extensions
|
393 |
+
if (UnknownFieldHandler::ParseExtension(msg, table, input, tag)) {
|
394 |
+
// successfully parsed
|
395 |
+
continue;
|
396 |
+
}
|
397 |
+
|
398 |
+
if (GOOGLE_PREDICT_FALSE(
|
399 |
+
!UnknownFieldHandler::Skip(msg, table, input, tag))) {
|
400 |
+
return false;
|
401 |
+
}
|
402 |
+
|
403 |
+
continue;
|
404 |
+
}
|
405 |
+
|
406 |
+
// We implicitly verify that data points to a valid field as we check the
|
407 |
+
// wire types. Entries in table.fields[i] that do not correspond to valid
|
408 |
+
// field numbers have their normal_wiretype and packed_wiretype fields set
|
409 |
+
// with the kInvalidMask value. As wire_type cannot take on that value, we
|
410 |
+
// will never match.
|
411 |
+
const ParseTableField* data = table.fields + field_number;
|
412 |
+
|
413 |
+
// TODO(ckennelly): Avoid sign extension
|
414 |
+
const int64 presence_index = data->presence_index;
|
415 |
+
const int64 offset = data->offset;
|
416 |
+
const unsigned char processing_type = data->processing_type;
|
417 |
+
|
418 |
+
if (data->normal_wiretype == static_cast<unsigned char>(wire_type)) {
|
419 |
+
// TODO(ckennelly): Use a computed goto on GCC/LLVM or otherwise eliminate
|
420 |
+
// the bounds check on processing_type.
|
421 |
+
|
422 |
+
switch (processing_type) {
|
423 |
+
#define HANDLE_TYPE(TYPE, CPPTYPE) \
|
424 |
+
case (WireFormatLite::TYPE_##TYPE): { \
|
425 |
+
CPPTYPE value; \
|
426 |
+
if (GOOGLE_PREDICT_FALSE( \
|
427 |
+
(!WireFormatLite::ReadPrimitive< \
|
428 |
+
CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, &value)))) { \
|
429 |
+
return false; \
|
430 |
+
} \
|
431 |
+
SetField(msg, has_bits, presence_index, offset, value); \
|
432 |
+
break; \
|
433 |
+
} \
|
434 |
+
case (WireFormatLite::TYPE_##TYPE) | kRepeatedMask: { \
|
435 |
+
google::protobuf::RepeatedField<CPPTYPE>* values = \
|
436 |
+
Raw<google::protobuf::RepeatedField<CPPTYPE> >(msg, offset); \
|
437 |
+
if (GOOGLE_PREDICT_FALSE((!WireFormatLite::ReadRepeatedPrimitive< \
|
438 |
+
CPPTYPE, WireFormatLite::TYPE_##TYPE>( \
|
439 |
+
data->tag_size, tag, input, values)))) { \
|
440 |
+
return false; \
|
441 |
+
} \
|
442 |
+
break; \
|
443 |
+
} \
|
444 |
+
case (WireFormatLite::TYPE_##TYPE) | kOneofMask: { \
|
445 |
+
uint32* oneof_case = Raw<uint32>(msg, table.oneof_case_offset); \
|
446 |
+
CPPTYPE value; \
|
447 |
+
if (GOOGLE_PREDICT_FALSE( \
|
448 |
+
(!WireFormatLite::ReadPrimitive< \
|
449 |
+
CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, &value)))) { \
|
450 |
+
return false; \
|
451 |
+
} \
|
452 |
+
ClearOneofField(table.fields[oneof_case[presence_index]], \
|
453 |
+
GetArena<InternalMetadata>(msg, table.arena_offset), msg); \
|
454 |
+
SetOneofField(msg, oneof_case, presence_index, offset, field_number, \
|
455 |
+
value); \
|
456 |
+
break; \
|
457 |
+
}
|
458 |
+
|
459 |
+
HANDLE_TYPE(INT32, int32)
|
460 |
+
HANDLE_TYPE(INT64, int64)
|
461 |
+
HANDLE_TYPE(SINT32, int32)
|
462 |
+
HANDLE_TYPE(SINT64, int64)
|
463 |
+
HANDLE_TYPE(UINT32, uint32)
|
464 |
+
HANDLE_TYPE(UINT64, uint64)
|
465 |
+
|
466 |
+
HANDLE_TYPE(FIXED32, uint32)
|
467 |
+
HANDLE_TYPE(FIXED64, uint64)
|
468 |
+
HANDLE_TYPE(SFIXED32, int32)
|
469 |
+
HANDLE_TYPE(SFIXED64, int64)
|
470 |
+
|
471 |
+
HANDLE_TYPE(FLOAT, float)
|
472 |
+
HANDLE_TYPE(DOUBLE, double)
|
473 |
+
|
474 |
+
HANDLE_TYPE(BOOL, bool)
|
475 |
+
#undef HANDLE_TYPE
|
476 |
+
case WireFormatLite::TYPE_BYTES:
|
477 |
+
#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
478 |
+
case WireFormatLite::TYPE_STRING:
|
479 |
+
#endif
|
480 |
+
{
|
481 |
+
Arena* const arena =
|
482 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
483 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
484 |
+
|
485 |
+
if (GOOGLE_PREDICT_FALSE((
|
486 |
+
!HandleString<Cardinality_SINGULAR, false, StringType_STRING>(
|
487 |
+
input, msg, arena, has_bits, presence_index, offset,
|
488 |
+
default_ptr, NULL)))) {
|
489 |
+
return false;
|
490 |
+
}
|
491 |
+
break;
|
492 |
+
}
|
493 |
+
case TYPE_BYTES_INLINED:
|
494 |
+
#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
495 |
+
case TYPE_STRING_INLINED:
|
496 |
+
#endif
|
497 |
+
{
|
498 |
+
Arena* const arena =
|
499 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
500 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
501 |
+
|
502 |
+
if (GOOGLE_PREDICT_FALSE((!HandleString<Cardinality_SINGULAR, false,
|
503 |
+
StringType_INLINED>(
|
504 |
+
input, msg, arena, has_bits, presence_index, offset,
|
505 |
+
default_ptr, NULL)))) {
|
506 |
+
return false;
|
507 |
+
}
|
508 |
+
break;
|
509 |
+
}
|
510 |
+
case WireFormatLite::TYPE_BYTES | kOneofMask:
|
511 |
+
#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
512 |
+
case WireFormatLite::TYPE_STRING | kOneofMask:
|
513 |
+
#endif
|
514 |
+
{
|
515 |
+
Arena* const arena =
|
516 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
517 |
+
uint32* oneof_case = Raw<uint32>(msg, table.oneof_case_offset);
|
518 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
519 |
+
|
520 |
+
ResetOneofField<ProcessingType_STRING>(
|
521 |
+
table, field_number, arena, msg, oneof_case + presence_index,
|
522 |
+
offset, default_ptr);
|
523 |
+
|
524 |
+
if (GOOGLE_PREDICT_FALSE(
|
525 |
+
(!HandleString<Cardinality_ONEOF, false, StringType_STRING>(
|
526 |
+
input, msg, arena, has_bits, presence_index, offset,
|
527 |
+
default_ptr, NULL)))) {
|
528 |
+
return false;
|
529 |
+
}
|
530 |
+
break;
|
531 |
+
}
|
532 |
+
case (WireFormatLite::TYPE_BYTES) | kRepeatedMask:
|
533 |
+
case TYPE_BYTES_INLINED | kRepeatedMask:
|
534 |
+
#ifndef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
535 |
+
case (WireFormatLite::TYPE_STRING) | kRepeatedMask:
|
536 |
+
case TYPE_STRING_INLINED | kRepeatedMask:
|
537 |
+
#endif
|
538 |
+
{
|
539 |
+
Arena* const arena =
|
540 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
541 |
+
const void* default_ptr =
|
542 |
+
table.aux[field_number].strings.default_ptr;
|
543 |
+
|
544 |
+
if (GOOGLE_PREDICT_FALSE((
|
545 |
+
!HandleString<Cardinality_REPEATED, false, StringType_STRING>(
|
546 |
+
input, msg, arena, has_bits, presence_index, offset,
|
547 |
+
default_ptr, NULL)))) {
|
548 |
+
return false;
|
549 |
+
}
|
550 |
+
break;
|
551 |
+
}
|
552 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
553 |
+
case (WireFormatLite::TYPE_STRING): {
|
554 |
+
Arena* const arena =
|
555 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
556 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
557 |
+
const char* field_name = table.aux[field_number].strings.field_name;
|
558 |
+
|
559 |
+
if (GOOGLE_PREDICT_FALSE(
|
560 |
+
(!HandleString<Cardinality_SINGULAR, true, StringType_STRING>(
|
561 |
+
input, msg, arena, has_bits, presence_index, offset,
|
562 |
+
default_ptr, field_name)))) {
|
563 |
+
return false;
|
564 |
+
}
|
565 |
+
break;
|
566 |
+
}
|
567 |
+
case TYPE_STRING_INLINED | kRepeatedMask:
|
568 |
+
case (WireFormatLite::TYPE_STRING) | kRepeatedMask: {
|
569 |
+
Arena* const arena =
|
570 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
571 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
572 |
+
const char* field_name = table.aux[field_number].strings.field_name;
|
573 |
+
|
574 |
+
if (GOOGLE_PREDICT_FALSE(
|
575 |
+
(!HandleString<Cardinality_REPEATED, true, StringType_STRING>(
|
576 |
+
input, msg, arena, has_bits, presence_index, offset,
|
577 |
+
default_ptr, field_name)))) {
|
578 |
+
return false;
|
579 |
+
}
|
580 |
+
break;
|
581 |
+
}
|
582 |
+
case (WireFormatLite::TYPE_STRING) | kOneofMask: {
|
583 |
+
Arena* const arena =
|
584 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
585 |
+
uint32* oneof_case = Raw<uint32>(msg, table.oneof_case_offset);
|
586 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
587 |
+
const char* field_name = table.aux[field_number].strings.field_name;
|
588 |
+
|
589 |
+
ResetOneofField<ProcessingType_STRING>(
|
590 |
+
table, field_number, arena, msg, oneof_case + presence_index,
|
591 |
+
offset, default_ptr);
|
592 |
+
|
593 |
+
if (GOOGLE_PREDICT_FALSE(
|
594 |
+
(!HandleString<Cardinality_ONEOF, true, StringType_STRING>(
|
595 |
+
input, msg, arena, has_bits, presence_index, offset,
|
596 |
+
default_ptr, field_name)))) {
|
597 |
+
return false;
|
598 |
+
}
|
599 |
+
break;
|
600 |
+
}
|
601 |
+
#endif
|
602 |
+
case WireFormatLite::TYPE_ENUM: {
|
603 |
+
if (GOOGLE_PREDICT_FALSE(
|
604 |
+
(!HandleEnum<UnknownFieldHandler, InternalMetadata,
|
605 |
+
Cardinality_SINGULAR>(
|
606 |
+
table, input, msg, has_bits, presence_index, offset, tag,
|
607 |
+
field_number)))) {
|
608 |
+
return false;
|
609 |
+
}
|
610 |
+
break;
|
611 |
+
}
|
612 |
+
case WireFormatLite::TYPE_ENUM | kRepeatedMask: {
|
613 |
+
if (GOOGLE_PREDICT_FALSE(
|
614 |
+
(!HandleEnum<UnknownFieldHandler, InternalMetadata,
|
615 |
+
Cardinality_REPEATED>(
|
616 |
+
table, input, msg, has_bits, presence_index, offset, tag,
|
617 |
+
field_number)))) {
|
618 |
+
return false;
|
619 |
+
}
|
620 |
+
break;
|
621 |
+
}
|
622 |
+
case WireFormatLite::TYPE_ENUM | kOneofMask: {
|
623 |
+
uint32* oneof_case = Raw<uint32>(msg, table.oneof_case_offset);
|
624 |
+
if (GOOGLE_PREDICT_FALSE(
|
625 |
+
(!HandleEnum<UnknownFieldHandler, InternalMetadata,
|
626 |
+
Cardinality_ONEOF>(table, input, msg, oneof_case,
|
627 |
+
presence_index, offset, tag,
|
628 |
+
field_number)))) {
|
629 |
+
return false;
|
630 |
+
}
|
631 |
+
break;
|
632 |
+
}
|
633 |
+
case WireFormatLite::TYPE_GROUP: {
|
634 |
+
MessageLite** submsg_holder =
|
635 |
+
MutableField<MessageLite*>(msg, has_bits, presence_index, offset);
|
636 |
+
MessageLite* submsg = *submsg_holder;
|
637 |
+
|
638 |
+
if (submsg == NULL) {
|
639 |
+
Arena* const arena =
|
640 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
641 |
+
const MessageLite* prototype =
|
642 |
+
table.aux[field_number].messages.default_message();
|
643 |
+
submsg = prototype->New(arena);
|
644 |
+
*submsg_holder = submsg;
|
645 |
+
}
|
646 |
+
|
647 |
+
if (GOOGLE_PREDICT_FALSE(
|
648 |
+
!WireFormatLite::ReadGroup(field_number, input, submsg))) {
|
649 |
+
return false;
|
650 |
+
}
|
651 |
+
|
652 |
+
break;
|
653 |
+
}
|
654 |
+
case WireFormatLite::TYPE_GROUP | kRepeatedMask: {
|
655 |
+
RepeatedPtrFieldBase* field = Raw<RepeatedPtrFieldBase>(msg, offset);
|
656 |
+
const MessageLite* prototype =
|
657 |
+
table.aux[field_number].messages.default_message();
|
658 |
+
GOOGLE_DCHECK(prototype != NULL);
|
659 |
+
|
660 |
+
MessageLite* submsg =
|
661 |
+
MergePartialFromCodedStreamHelper::Add(field, prototype);
|
662 |
+
|
663 |
+
if (GOOGLE_PREDICT_FALSE(
|
664 |
+
!WireFormatLite::ReadGroup(field_number, input, submsg))) {
|
665 |
+
return false;
|
666 |
+
}
|
667 |
+
|
668 |
+
break;
|
669 |
+
}
|
670 |
+
case WireFormatLite::TYPE_MESSAGE: {
|
671 |
+
MessageLite** submsg_holder =
|
672 |
+
MutableField<MessageLite*>(msg, has_bits, presence_index, offset);
|
673 |
+
MessageLite* submsg = *submsg_holder;
|
674 |
+
|
675 |
+
if (submsg == NULL) {
|
676 |
+
Arena* const arena =
|
677 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
678 |
+
const MessageLite* prototype =
|
679 |
+
table.aux[field_number].messages.default_message();
|
680 |
+
if (prototype == NULL) {
|
681 |
+
prototype =
|
682 |
+
::google::protobuf::internal::ImplicitWeakMessage::default_instance();
|
683 |
+
}
|
684 |
+
submsg = prototype->New(arena);
|
685 |
+
*submsg_holder = submsg;
|
686 |
+
}
|
687 |
+
|
688 |
+
if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) {
|
689 |
+
return false;
|
690 |
+
}
|
691 |
+
|
692 |
+
break;
|
693 |
+
}
|
694 |
+
// TODO(ckennelly): Adapt ReadMessageNoVirtualNoRecursionDepth and
|
695 |
+
// manage input->IncrementRecursionDepth() here.
|
696 |
+
case WireFormatLite::TYPE_MESSAGE | kRepeatedMask: {
|
697 |
+
RepeatedPtrFieldBase* field = Raw<RepeatedPtrFieldBase>(msg, offset);
|
698 |
+
const MessageLite* prototype =
|
699 |
+
table.aux[field_number].messages.default_message();
|
700 |
+
if (prototype == NULL) {
|
701 |
+
prototype =
|
702 |
+
::google::protobuf::internal::ImplicitWeakMessage::default_instance();
|
703 |
+
}
|
704 |
+
|
705 |
+
MessageLite* submsg =
|
706 |
+
MergePartialFromCodedStreamHelper::Add(field, prototype);
|
707 |
+
|
708 |
+
if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) {
|
709 |
+
return false;
|
710 |
+
}
|
711 |
+
|
712 |
+
break;
|
713 |
+
}
|
714 |
+
case WireFormatLite::TYPE_MESSAGE | kOneofMask: {
|
715 |
+
Arena* const arena =
|
716 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
717 |
+
uint32* oneof_case = Raw<uint32>(msg, table.oneof_case_offset);
|
718 |
+
MessageLite** submsg_holder = Raw<MessageLite*>(msg, offset);
|
719 |
+
ResetOneofField<ProcessingType_MESSAGE>(
|
720 |
+
table, field_number, arena, msg, oneof_case + presence_index,
|
721 |
+
offset, NULL);
|
722 |
+
MessageLite* submsg = *submsg_holder;
|
723 |
+
|
724 |
+
if (GOOGLE_PREDICT_FALSE(!WireFormatLite::ReadMessage(input, submsg))) {
|
725 |
+
return false;
|
726 |
+
}
|
727 |
+
|
728 |
+
break;
|
729 |
+
}
|
730 |
+
#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
731 |
+
case TYPE_STRING_INLINED: {
|
732 |
+
Arena* const arena =
|
733 |
+
GetArena<InternalMetadata>(msg, table.arena_offset);
|
734 |
+
const void* default_ptr = table.aux[field_number].strings.default_ptr;
|
735 |
+
const char* field_name = table.aux[field_number].strings.field_name;
|
736 |
+
|
737 |
+
if (GOOGLE_PREDICT_FALSE((
|
738 |
+
!HandleString<Cardinality_SINGULAR, true, StringType_INLINED>(
|
739 |
+
input, msg, arena, has_bits, presence_index, offset,
|
740 |
+
default_ptr, field_name)))) {
|
741 |
+
return false;
|
742 |
+
}
|
743 |
+
break;
|
744 |
+
}
|
745 |
+
#endif // GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED
|
746 |
+
case TYPE_MAP: {
|
747 |
+
if (GOOGLE_PREDICT_FALSE(!(*table.aux[field_number].maps.parse_map)(
|
748 |
+
input, Raw<void>(msg, offset)))) {
|
749 |
+
return false;
|
750 |
+
}
|
751 |
+
break;
|
752 |
+
}
|
753 |
+
case 0: {
|
754 |
+
// Done.
|
755 |
+
return true;
|
756 |
+
}
|
757 |
+
default:
|
758 |
+
break;
|
759 |
+
}
|
760 |
+
} else if (data->packed_wiretype == static_cast<unsigned char>(wire_type)) {
|
761 |
+
// Non-packable fields have their packed_wiretype masked with
|
762 |
+
// kNotPackedMask, which is impossible to match here.
|
763 |
+
GOOGLE_DCHECK(processing_type & kRepeatedMask);
|
764 |
+
GOOGLE_DCHECK_NE(processing_type, kRepeatedMask);
|
765 |
+
GOOGLE_DCHECK_EQ(0, processing_type & kOneofMask);
|
766 |
+
|
767 |
+
GOOGLE_DCHECK_NE(TYPE_BYTES_INLINED | kRepeatedMask, processing_type);
|
768 |
+
GOOGLE_DCHECK_NE(TYPE_STRING_INLINED | kRepeatedMask, processing_type);
|
769 |
+
|
770 |
+
// TODO(ckennelly): Use a computed goto on GCC/LLVM.
|
771 |
+
//
|
772 |
+
// Mask out kRepeatedMask bit, allowing the jump table to be smaller.
|
773 |
+
switch (static_cast<WireFormatLite::FieldType>(
|
774 |
+
processing_type ^ kRepeatedMask)) {
|
775 |
+
#define HANDLE_PACKED_TYPE(TYPE, CPPTYPE, CPPTYPE_METHOD) \
|
776 |
+
case WireFormatLite::TYPE_##TYPE: { \
|
777 |
+
google::protobuf::RepeatedField<CPPTYPE>* values = \
|
778 |
+
Raw<google::protobuf::RepeatedField<CPPTYPE> >(msg, offset); \
|
779 |
+
if (GOOGLE_PREDICT_FALSE( \
|
780 |
+
(!WireFormatLite::ReadPackedPrimitive< \
|
781 |
+
CPPTYPE, WireFormatLite::TYPE_##TYPE>(input, values)))) { \
|
782 |
+
return false; \
|
783 |
+
} \
|
784 |
+
break; \
|
785 |
+
}
|
786 |
+
|
787 |
+
HANDLE_PACKED_TYPE(INT32, int32, Int32)
|
788 |
+
HANDLE_PACKED_TYPE(INT64, int64, Int64)
|
789 |
+
HANDLE_PACKED_TYPE(SINT32, int32, Int32)
|
790 |
+
HANDLE_PACKED_TYPE(SINT64, int64, Int64)
|
791 |
+
HANDLE_PACKED_TYPE(UINT32, uint32, UInt32)
|
792 |
+
HANDLE_PACKED_TYPE(UINT64, uint64, UInt64)
|
793 |
+
|
794 |
+
HANDLE_PACKED_TYPE(FIXED32, uint32, UInt32)
|
795 |
+
HANDLE_PACKED_TYPE(FIXED64, uint64, UInt64)
|
796 |
+
HANDLE_PACKED_TYPE(SFIXED32, int32, Int32)
|
797 |
+
HANDLE_PACKED_TYPE(SFIXED64, int64, Int64)
|
798 |
+
|
799 |
+
HANDLE_PACKED_TYPE(FLOAT, float, Float)
|
800 |
+
HANDLE_PACKED_TYPE(DOUBLE, double, Double)
|
801 |
+
|
802 |
+
HANDLE_PACKED_TYPE(BOOL, bool, Bool)
|
803 |
+
#undef HANDLE_PACKED_TYPE
|
804 |
+
case WireFormatLite::TYPE_ENUM: {
|
805 |
+
// To avoid unnecessarily calling MutableUnknownFields (which mutates
|
806 |
+
// InternalMetadataWithArena) when all inputs in the repeated series
|
807 |
+
// are valid, we implement our own parser rather than call
|
808 |
+
// WireFormat::ReadPackedEnumPreserveUnknowns.
|
809 |
+
uint32 length;
|
810 |
+
if (GOOGLE_PREDICT_FALSE(!input->ReadVarint32(&length))) {
|
811 |
+
return false;
|
812 |
+
}
|
813 |
+
|
814 |
+
AuxillaryParseTableField::EnumValidator validator =
|
815 |
+
table.aux[field_number].enums.validator;
|
816 |
+
google::protobuf::RepeatedField<int>* values =
|
817 |
+
Raw<google::protobuf::RepeatedField<int> >(msg, offset);
|
818 |
+
|
819 |
+
io::CodedInputStream::Limit limit = input->PushLimit(length);
|
820 |
+
while (input->BytesUntilLimit() > 0) {
|
821 |
+
int value;
|
822 |
+
if (GOOGLE_PREDICT_FALSE(
|
823 |
+
(!google::protobuf::internal::WireFormatLite::ReadPrimitive<
|
824 |
+
int, WireFormatLite::TYPE_ENUM>(input, &value)))) {
|
825 |
+
return false;
|
826 |
+
}
|
827 |
+
|
828 |
+
if (validator(value)) {
|
829 |
+
values->Add(value);
|
830 |
+
} else {
|
831 |
+
// TODO(ckennelly): Consider caching here.
|
832 |
+
UnknownFieldHandler::Varint(msg, table, tag, value);
|
833 |
+
}
|
834 |
+
}
|
835 |
+
input->PopLimit(limit);
|
836 |
+
|
837 |
+
break;
|
838 |
+
}
|
839 |
+
case WireFormatLite::TYPE_STRING:
|
840 |
+
case WireFormatLite::TYPE_GROUP:
|
841 |
+
case WireFormatLite::TYPE_MESSAGE:
|
842 |
+
case WireFormatLite::TYPE_BYTES:
|
843 |
+
GOOGLE_DCHECK(false);
|
844 |
+
return false;
|
845 |
+
default:
|
846 |
+
break;
|
847 |
+
}
|
848 |
+
} else {
|
849 |
+
if (wire_type == WireFormatLite::WIRETYPE_END_GROUP) {
|
850 |
+
// Must be the end of the message.
|
851 |
+
return true;
|
852 |
+
}
|
853 |
+
|
854 |
+
// check for possible extensions
|
855 |
+
if (UnknownFieldHandler::ParseExtension(msg, table, input, tag)) {
|
856 |
+
// successfully parsed
|
857 |
+
continue;
|
858 |
+
}
|
859 |
+
|
860 |
+
// process unknown field.
|
861 |
+
if (GOOGLE_PREDICT_FALSE(
|
862 |
+
!UnknownFieldHandler::Skip(msg, table, input, tag))) {
|
863 |
+
return false;
|
864 |
+
}
|
865 |
+
}
|
866 |
+
}
|
867 |
+
}
|
868 |
+
|
869 |
+
} // namespace internal
|
870 |
+
} // namespace protobuf
|
871 |
+
|
872 |
+
} // namespace google
|
873 |
+
#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_TABLE_DRIVEN_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/generated_message_util.h
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This file contains miscellaneous helper code used by generated code --
|
36 |
+
// including lite types -- but which should not be used directly by users.
|
37 |
+
|
38 |
+
#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__
|
39 |
+
#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__
|
40 |
+
|
41 |
+
#include <assert.h>
|
42 |
+
#include <atomic>
|
43 |
+
#include <climits>
|
44 |
+
#include <string>
|
45 |
+
#include <vector>
|
46 |
+
|
47 |
+
#include <google/protobuf/stubs/logging.h>
|
48 |
+
#include <google/protobuf/stubs/common.h>
|
49 |
+
#include <google/protobuf/stubs/once.h> // Add direct dep on port for pb.cc
|
50 |
+
#include <google/protobuf/has_bits.h>
|
51 |
+
#include <google/protobuf/implicit_weak_message.h>
|
52 |
+
#include <google/protobuf/map_entry_lite.h>
|
53 |
+
#include <google/protobuf/message_lite.h>
|
54 |
+
#include <google/protobuf/wire_format_lite.h>
|
55 |
+
|
56 |
+
namespace google {
|
57 |
+
|
58 |
+
namespace protobuf {
|
59 |
+
|
60 |
+
class Arena;
|
61 |
+
|
62 |
+
namespace io { class CodedInputStream; }
|
63 |
+
|
64 |
+
namespace internal {
|
65 |
+
|
66 |
+
|
67 |
+
// Annotation for the compiler to emit a deprecation message if a field marked
|
68 |
+
// with option 'deprecated=true' is used in the code, or for other things in
|
69 |
+
// generated code which are deprecated.
|
70 |
+
//
|
71 |
+
// For internal use in the pb.cc files, deprecation warnings are suppressed
|
72 |
+
// there.
|
73 |
+
#undef DEPRECATED_PROTOBUF_FIELD
|
74 |
+
#define PROTOBUF_DEPRECATED
|
75 |
+
|
76 |
+
#define GOOGLE_PROTOBUF_DEPRECATED_ATTR
|
77 |
+
|
78 |
+
|
79 |
+
// Returns the offset of the given field within the given aggregate type.
|
80 |
+
// This is equivalent to the ANSI C offsetof() macro. However, according
|
81 |
+
// to the C++ standard, offsetof() only works on POD types, and GCC
|
82 |
+
// enforces this requirement with a warning. In practice, this rule is
|
83 |
+
// unnecessarily strict; there is probably no compiler or platform on
|
84 |
+
// which the offsets of the direct fields of a class are non-constant.
|
85 |
+
// Fields inherited from superclasses *can* have non-constant offsets,
|
86 |
+
// but that's not what this macro will be used for.
|
87 |
+
#if defined(__clang__)
|
88 |
+
// For Clang we use __builtin_offsetof() and suppress the warning,
|
89 |
+
// to avoid Control Flow Integrity and UBSan vptr sanitizers from
|
90 |
+
// crashing while trying to validate the invalid reinterpet_casts.
|
91 |
+
#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD) \
|
92 |
+
_Pragma("clang diagnostic push") \
|
93 |
+
_Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \
|
94 |
+
__builtin_offsetof(TYPE, FIELD) \
|
95 |
+
_Pragma("clang diagnostic pop")
|
96 |
+
#else
|
97 |
+
// Note that we calculate relative to the pointer value 16 here since if we
|
98 |
+
// just use zero, GCC complains about dereferencing a NULL pointer. We
|
99 |
+
// choose 16 rather than some other number just in case the compiler would
|
100 |
+
// be confused by an unaligned pointer.
|
101 |
+
#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD) \
|
102 |
+
static_cast< ::google::protobuf::uint32>( \
|
103 |
+
reinterpret_cast<const char*>( \
|
104 |
+
&reinterpret_cast<const TYPE*>(16)->FIELD) - \
|
105 |
+
reinterpret_cast<const char*>(16))
|
106 |
+
#endif
|
107 |
+
|
108 |
+
// Constants for special floating point values.
|
109 |
+
LIBPROTOBUF_EXPORT double Infinity();
|
110 |
+
LIBPROTOBUF_EXPORT double NaN();
|
111 |
+
|
112 |
+
LIBPROTOBUF_EXPORT void InitProtobufDefaults();
|
113 |
+
|
114 |
+
// This used by proto1
|
115 |
+
inline const std::string& GetEmptyString() {
|
116 |
+
InitProtobufDefaults();
|
117 |
+
return GetEmptyStringAlreadyInited();
|
118 |
+
}
|
119 |
+
|
120 |
+
// True if IsInitialized() is true for all elements of t. Type is expected
|
121 |
+
// to be a RepeatedPtrField<some message type>. It's useful to have this
|
122 |
+
// helper here to keep the protobuf compiler from ever having to emit loops in
|
123 |
+
// IsInitialized() methods. We want the C++ compiler to inline this or not
|
124 |
+
// as it sees fit.
|
125 |
+
template <class Type> bool AllAreInitialized(const Type& t) {
|
126 |
+
for (int i = t.size(); --i >= 0; ) {
|
127 |
+
if (!t.Get(i).IsInitialized()) return false;
|
128 |
+
}
|
129 |
+
return true;
|
130 |
+
}
|
131 |
+
|
132 |
+
// "Weak" variant of AllAreInitialized, used to implement implicit weak fields.
|
133 |
+
// This version operates on MessageLite to avoid introducing a dependency on the
|
134 |
+
// concrete message type.
|
135 |
+
template <class T>
|
136 |
+
bool AllAreInitializedWeak(const ::google::protobuf::RepeatedPtrField<T>& t) {
|
137 |
+
for (int i = t.size(); --i >= 0;) {
|
138 |
+
if (!reinterpret_cast<const ::google::protobuf::internal::RepeatedPtrFieldBase&>(t)
|
139 |
+
.Get<::google::protobuf::internal::ImplicitWeakTypeHandler<T> >(i)
|
140 |
+
.IsInitialized()) {
|
141 |
+
return false;
|
142 |
+
}
|
143 |
+
}
|
144 |
+
return true;
|
145 |
+
}
|
146 |
+
|
147 |
+
struct LIBPROTOBUF_EXPORT FieldMetadata {
|
148 |
+
uint32 offset; // offset of this field in the struct
|
149 |
+
uint32 tag; // field * 8 + wire_type
|
150 |
+
// byte offset * 8 + bit_offset;
|
151 |
+
// if the high bit is set then this is the byte offset of the oneof_case
|
152 |
+
// for this field.
|
153 |
+
uint32 has_offset;
|
154 |
+
uint32 type; // the type of this field.
|
155 |
+
const void* ptr; // auxiliary data
|
156 |
+
|
157 |
+
// From the serializer point of view each fundamental type can occur in
|
158 |
+
// 4 different ways. For simplicity we treat all combinations as a cartesion
|
159 |
+
// product although not all combinations are allowed.
|
160 |
+
enum FieldTypeClass {
|
161 |
+
kPresence,
|
162 |
+
kNoPresence,
|
163 |
+
kRepeated,
|
164 |
+
kPacked,
|
165 |
+
kOneOf,
|
166 |
+
kNumTypeClasses // must be last enum
|
167 |
+
};
|
168 |
+
// C++ protobuf has 20 fundamental types, were we added Cord and StringPiece
|
169 |
+
// and also distinquish the same types if they have different wire format.
|
170 |
+
enum {
|
171 |
+
kCordType = 19,
|
172 |
+
kStringPieceType = 20,
|
173 |
+
kInlinedType = 21,
|
174 |
+
kNumTypes = 21,
|
175 |
+
kSpecial = kNumTypes * kNumTypeClasses,
|
176 |
+
};
|
177 |
+
|
178 |
+
static int CalculateType(int fundamental_type, FieldTypeClass type_class);
|
179 |
+
};
|
180 |
+
|
181 |
+
inline bool IsPresent(const void* base, uint32 hasbit) {
|
182 |
+
const uint32* has_bits_array = static_cast<const uint32*>(base);
|
183 |
+
return (has_bits_array[hasbit / 32] & (1u << (hasbit & 31))) != 0;
|
184 |
+
}
|
185 |
+
|
186 |
+
inline bool IsOneofPresent(const void* base, uint32 offset, uint32 tag) {
|
187 |
+
const uint32* oneof =
|
188 |
+
reinterpret_cast<const uint32*>(static_cast<const uint8*>(base) + offset);
|
189 |
+
return *oneof == tag >> 3;
|
190 |
+
}
|
191 |
+
|
192 |
+
typedef void (*SpecialSerializer)(const uint8* base, uint32 offset, uint32 tag,
|
193 |
+
uint32 has_offset,
|
194 |
+
::google::protobuf::io::CodedOutputStream* output);
|
195 |
+
|
196 |
+
LIBPROTOBUF_EXPORT void ExtensionSerializer(const uint8* base, uint32 offset, uint32 tag,
|
197 |
+
uint32 has_offset,
|
198 |
+
::google::protobuf::io::CodedOutputStream* output);
|
199 |
+
LIBPROTOBUF_EXPORT void UnknownFieldSerializerLite(const uint8* base, uint32 offset, uint32 tag,
|
200 |
+
uint32 has_offset,
|
201 |
+
::google::protobuf::io::CodedOutputStream* output);
|
202 |
+
|
203 |
+
struct SerializationTable {
|
204 |
+
int num_fields;
|
205 |
+
const FieldMetadata* field_table;
|
206 |
+
};
|
207 |
+
|
208 |
+
LIBPROTOBUF_EXPORT void SerializeInternal(const uint8* base, const FieldMetadata* table,
|
209 |
+
int num_fields, ::google::protobuf::io::CodedOutputStream* output);
|
210 |
+
|
211 |
+
inline void TableSerialize(const ::google::protobuf::MessageLite& msg,
|
212 |
+
const SerializationTable* table,
|
213 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
214 |
+
const FieldMetadata* field_table = table->field_table;
|
215 |
+
int num_fields = table->num_fields - 1;
|
216 |
+
const uint8* base = reinterpret_cast<const uint8*>(&msg);
|
217 |
+
// TODO(gerbens) This skips the first test if we could use the fast
|
218 |
+
// array serialization path, we should make this
|
219 |
+
// int cached_size =
|
220 |
+
// *reinterpret_cast<const int32*>(base + field_table->offset);
|
221 |
+
// SerializeWithCachedSize(msg, field_table + 1, num_fields, cached_size, ...)
|
222 |
+
// But we keep conformance with the old way for now.
|
223 |
+
SerializeInternal(base, field_table + 1, num_fields, output);
|
224 |
+
}
|
225 |
+
|
226 |
+
uint8* SerializeInternalToArray(const uint8* base, const FieldMetadata* table,
|
227 |
+
int num_fields, bool is_deterministic,
|
228 |
+
uint8* buffer);
|
229 |
+
|
230 |
+
inline uint8* TableSerializeToArray(const ::google::protobuf::MessageLite& msg,
|
231 |
+
const SerializationTable* table,
|
232 |
+
bool is_deterministic, uint8* buffer) {
|
233 |
+
const uint8* base = reinterpret_cast<const uint8*>(&msg);
|
234 |
+
const FieldMetadata* field_table = table->field_table + 1;
|
235 |
+
int num_fields = table->num_fields - 1;
|
236 |
+
return SerializeInternalToArray(base, field_table, num_fields,
|
237 |
+
is_deterministic, buffer);
|
238 |
+
}
|
239 |
+
|
240 |
+
template <typename T>
|
241 |
+
struct CompareHelper {
|
242 |
+
bool operator()(const T& a, const T& b) { return a < b; }
|
243 |
+
};
|
244 |
+
|
245 |
+
template <>
|
246 |
+
struct CompareHelper<ArenaStringPtr> {
|
247 |
+
bool operator()(const ArenaStringPtr& a, const ArenaStringPtr& b) {
|
248 |
+
return a.Get() < b.Get();
|
249 |
+
}
|
250 |
+
};
|
251 |
+
|
252 |
+
struct CompareMapKey {
|
253 |
+
template <typename T>
|
254 |
+
bool operator()(const MapEntryHelper<T>& a, const MapEntryHelper<T>& b) {
|
255 |
+
return Compare(a.key_, b.key_);
|
256 |
+
}
|
257 |
+
template <typename T>
|
258 |
+
bool Compare(const T& a, const T& b) {
|
259 |
+
return CompareHelper<T>()(a, b);
|
260 |
+
}
|
261 |
+
};
|
262 |
+
|
263 |
+
template <typename MapFieldType, const SerializationTable* table>
|
264 |
+
void MapFieldSerializer(const uint8* base, uint32 offset, uint32 tag,
|
265 |
+
uint32 has_offset,
|
266 |
+
::google::protobuf::io::CodedOutputStream* output) {
|
267 |
+
typedef MapEntryHelper<typename MapFieldType::EntryTypeTrait> Entry;
|
268 |
+
typedef typename MapFieldType::MapType::const_iterator Iter;
|
269 |
+
|
270 |
+
const MapFieldType& map_field =
|
271 |
+
*reinterpret_cast<const MapFieldType*>(base + offset);
|
272 |
+
const SerializationTable* t =
|
273 |
+
table +
|
274 |
+
has_offset; // has_offset is overloaded for maps to mean table offset
|
275 |
+
if (!output->IsSerializationDeterministic()) {
|
276 |
+
for (Iter it = map_field.GetMap().begin(); it != map_field.GetMap().end();
|
277 |
+
++it) {
|
278 |
+
Entry map_entry(*it);
|
279 |
+
output->WriteVarint32(tag);
|
280 |
+
output->WriteVarint32(map_entry._cached_size_);
|
281 |
+
SerializeInternal(reinterpret_cast<const uint8*>(&map_entry),
|
282 |
+
t->field_table, t->num_fields, output);
|
283 |
+
}
|
284 |
+
} else {
|
285 |
+
std::vector<Entry> v;
|
286 |
+
for (Iter it = map_field.GetMap().begin(); it != map_field.GetMap().end();
|
287 |
+
++it) {
|
288 |
+
v.push_back(Entry(*it));
|
289 |
+
}
|
290 |
+
std::sort(v.begin(), v.end(), CompareMapKey());
|
291 |
+
for (int i = 0; i < v.size(); i++) {
|
292 |
+
output->WriteVarint32(tag);
|
293 |
+
output->WriteVarint32(v[i]._cached_size_);
|
294 |
+
SerializeInternal(reinterpret_cast<const uint8*>(&v[i]), t->field_table,
|
295 |
+
t->num_fields, output);
|
296 |
+
}
|
297 |
+
}
|
298 |
+
}
|
299 |
+
|
300 |
+
LIBPROTOBUF_EXPORT MessageLite* DuplicateIfNonNullInternal(MessageLite* message);
|
301 |
+
LIBPROTOBUF_EXPORT MessageLite* GetOwnedMessageInternal(Arena* message_arena,
|
302 |
+
MessageLite* submessage,
|
303 |
+
Arena* submessage_arena);
|
304 |
+
|
305 |
+
template <typename T>
|
306 |
+
T* DuplicateIfNonNull(T* message) {
|
307 |
+
// The casts must be reinterpret_cast<> because T might be a forward-declared
|
308 |
+
// type that the compiler doesn't know is related to MessageLite.
|
309 |
+
return reinterpret_cast<T*>(
|
310 |
+
DuplicateIfNonNullInternal(reinterpret_cast<MessageLite*>(message)));
|
311 |
+
}
|
312 |
+
|
313 |
+
template <typename T>
|
314 |
+
T* GetOwnedMessage(Arena* message_arena, T* submessage,
|
315 |
+
Arena* submessage_arena) {
|
316 |
+
// The casts must be reinterpret_cast<> because T might be a forward-declared
|
317 |
+
// type that the compiler doesn't know is related to MessageLite.
|
318 |
+
return reinterpret_cast<T*>(GetOwnedMessageInternal(
|
319 |
+
message_arena, reinterpret_cast<MessageLite*>(submessage),
|
320 |
+
submessage_arena));
|
321 |
+
}
|
322 |
+
|
323 |
+
// Hide atomic from the public header and allow easy change to regular int
|
324 |
+
// on platforms where the atomic might have a perf impact.
|
325 |
+
class LIBPROTOBUF_EXPORT CachedSize {
|
326 |
+
public:
|
327 |
+
int Get() const { return size_.load(std::memory_order_relaxed); }
|
328 |
+
void Set(int size) { size_.store(size, std::memory_order_relaxed); }
|
329 |
+
private:
|
330 |
+
std::atomic<int> size_{0};
|
331 |
+
};
|
332 |
+
|
333 |
+
// SCCInfo represents information of a strongly connected component of
|
334 |
+
// mutual dependent messages.
|
335 |
+
struct LIBPROTOBUF_EXPORT SCCInfoBase {
|
336 |
+
// We use 0 for the Initialized state, because test eax,eax, jnz is smaller
|
337 |
+
// and is subject to macro fusion.
|
338 |
+
enum {
|
339 |
+
kInitialized = 0, // final state
|
340 |
+
kRunning = 1,
|
341 |
+
kUninitialized = -1, // initial state
|
342 |
+
};
|
343 |
+
#ifndef _MSC_VER
|
344 |
+
std::atomic<int> visit_status;
|
345 |
+
#else
|
346 |
+
// MSVC doesnt make std::atomic constant initialized. This union trick
|
347 |
+
// makes it so.
|
348 |
+
union {
|
349 |
+
int visit_status_to_make_linker_init;
|
350 |
+
std::atomic<int> visit_status;
|
351 |
+
};
|
352 |
+
#endif
|
353 |
+
int num_deps;
|
354 |
+
void (*init_func)();
|
355 |
+
// This is followed by an array of num_deps
|
356 |
+
// const SCCInfoBase* deps[];
|
357 |
+
};
|
358 |
+
|
359 |
+
template <int N>
|
360 |
+
struct SCCInfo {
|
361 |
+
SCCInfoBase base;
|
362 |
+
// Semantically this is const SCCInfo<T>* which is is a templated type.
|
363 |
+
// The obvious inheriting from SCCInfoBase mucks with struct initialization.
|
364 |
+
// Attempts showed the compiler was generating dynamic initialization code.
|
365 |
+
// Zero length arrays produce warnings with MSVC.
|
366 |
+
SCCInfoBase* deps[N ? N : 1];
|
367 |
+
};
|
368 |
+
|
369 |
+
LIBPROTOBUF_EXPORT void InitSCCImpl(SCCInfoBase* scc);
|
370 |
+
|
371 |
+
inline void InitSCC(SCCInfoBase* scc) {
|
372 |
+
auto status = scc->visit_status.load(std::memory_order_acquire);
|
373 |
+
if (GOOGLE_PREDICT_FALSE(status != SCCInfoBase::kInitialized)) InitSCCImpl(scc);
|
374 |
+
}
|
375 |
+
|
376 |
+
LIBPROTOBUF_EXPORT void DestroyMessage(const void* message);
|
377 |
+
LIBPROTOBUF_EXPORT void DestroyString(const void* s);
|
378 |
+
// Destroy (not delete) the message
|
379 |
+
inline void OnShutdownDestroyMessage(const void* ptr) {
|
380 |
+
OnShutdownRun(DestroyMessage, ptr);
|
381 |
+
}
|
382 |
+
// Destroy the string (call string destructor)
|
383 |
+
inline void OnShutdownDestroyString(const std::string* ptr) {
|
384 |
+
OnShutdownRun(DestroyString, ptr);
|
385 |
+
}
|
386 |
+
|
387 |
+
} // namespace internal
|
388 |
+
} // namespace protobuf
|
389 |
+
|
390 |
+
} // namespace google
|
391 |
+
#endif // GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/has_bits.h
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_HAS_BITS_H__
|
32 |
+
#define GOOGLE_PROTOBUF_HAS_BITS_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/stubs/common.h>
|
35 |
+
#include <google/protobuf/stubs/port.h>
|
36 |
+
|
37 |
+
namespace google {
|
38 |
+
namespace protobuf {
|
39 |
+
namespace internal {
|
40 |
+
|
41 |
+
template<size_t doublewords>
|
42 |
+
class HasBits {
|
43 |
+
public:
|
44 |
+
HasBits() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE { Clear(); }
|
45 |
+
|
46 |
+
void Clear() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
47 |
+
memset(has_bits_, 0, sizeof(has_bits_));
|
48 |
+
}
|
49 |
+
|
50 |
+
::google::protobuf::uint32& operator[](int index) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
51 |
+
return has_bits_[index];
|
52 |
+
}
|
53 |
+
|
54 |
+
const ::google::protobuf::uint32& operator[](int index) const
|
55 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
56 |
+
return has_bits_[index];
|
57 |
+
}
|
58 |
+
|
59 |
+
bool operator==(const HasBits<doublewords>& rhs) const {
|
60 |
+
return memcmp(has_bits_, rhs.has_bits_, sizeof(has_bits_)) == 0;
|
61 |
+
}
|
62 |
+
|
63 |
+
bool operator!=(const HasBits<doublewords>& rhs) const {
|
64 |
+
return !(*this == rhs);
|
65 |
+
}
|
66 |
+
|
67 |
+
bool empty() const;
|
68 |
+
|
69 |
+
private:
|
70 |
+
::google::protobuf::uint32 has_bits_[doublewords];
|
71 |
+
};
|
72 |
+
|
73 |
+
template <>
|
74 |
+
inline bool HasBits<1>::empty() const {
|
75 |
+
return !has_bits_[0];
|
76 |
+
}
|
77 |
+
|
78 |
+
template <>
|
79 |
+
inline bool HasBits<2>::empty() const {
|
80 |
+
return !(has_bits_[0] | has_bits_[1]);
|
81 |
+
}
|
82 |
+
|
83 |
+
template <>
|
84 |
+
inline bool HasBits<3>::empty() const {
|
85 |
+
return !(has_bits_[0] | has_bits_[1] | has_bits_[2]);
|
86 |
+
}
|
87 |
+
|
88 |
+
template <>
|
89 |
+
inline bool HasBits<4>::empty() const {
|
90 |
+
return !(has_bits_[0] | has_bits_[1] | has_bits_[2] | has_bits_[3]);
|
91 |
+
}
|
92 |
+
|
93 |
+
template <size_t doublewords>
|
94 |
+
inline bool HasBits<doublewords>::empty() const {
|
95 |
+
for (size_t i = 0; i < doublewords; ++i) {
|
96 |
+
if (has_bits_[i]) return false;
|
97 |
+
}
|
98 |
+
return true;
|
99 |
+
}
|
100 |
+
|
101 |
+
} // namespace internal
|
102 |
+
} // namespace protobuf
|
103 |
+
|
104 |
+
} // namespace google
|
105 |
+
#endif // GOOGLE_PROTOBUF_HAS_BITS_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/implicit_weak_message.h
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__
|
32 |
+
#define GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/io/coded_stream.h>
|
35 |
+
#include <google/protobuf/arena.h>
|
36 |
+
#include <google/protobuf/message_lite.h>
|
37 |
+
|
38 |
+
// This file is logically internal-only and should only be used by protobuf
|
39 |
+
// generated code.
|
40 |
+
|
41 |
+
namespace google {
|
42 |
+
namespace protobuf {
|
43 |
+
namespace internal {
|
44 |
+
|
45 |
+
// An implementation of MessageLite that treats all data as unknown. This type
|
46 |
+
// acts as a placeholder for an implicit weak field in the case where the true
|
47 |
+
// message type does not get linked into the binary.
|
48 |
+
class LIBPROTOBUF_EXPORT ImplicitWeakMessage : public MessageLite {
|
49 |
+
public:
|
50 |
+
ImplicitWeakMessage() : arena_(NULL) {}
|
51 |
+
explicit ImplicitWeakMessage(Arena* arena) : arena_(arena) {}
|
52 |
+
|
53 |
+
static const ImplicitWeakMessage* default_instance();
|
54 |
+
|
55 |
+
string GetTypeName() const { return ""; }
|
56 |
+
|
57 |
+
MessageLite* New() const { return new ImplicitWeakMessage; }
|
58 |
+
MessageLite* New(Arena* arena) const {
|
59 |
+
return Arena::CreateMessage<ImplicitWeakMessage>(arena);
|
60 |
+
}
|
61 |
+
|
62 |
+
Arena* GetArena() const { return arena_; }
|
63 |
+
|
64 |
+
void Clear() { data_.clear(); }
|
65 |
+
|
66 |
+
bool IsInitialized() const { return true; }
|
67 |
+
|
68 |
+
void CheckTypeAndMergeFrom(const MessageLite& other) {
|
69 |
+
data_.append(static_cast<const ImplicitWeakMessage&>(other).data_);
|
70 |
+
}
|
71 |
+
|
72 |
+
bool MergePartialFromCodedStream(io::CodedInputStream* input);
|
73 |
+
|
74 |
+
size_t ByteSizeLong() const { return data_.size(); }
|
75 |
+
|
76 |
+
void SerializeWithCachedSizes(io::CodedOutputStream* output) const {
|
77 |
+
output->WriteString(data_);
|
78 |
+
}
|
79 |
+
|
80 |
+
int GetCachedSize() const { return static_cast<int>(data_.size()); }
|
81 |
+
|
82 |
+
typedef void InternalArenaConstructable_;
|
83 |
+
|
84 |
+
private:
|
85 |
+
Arena* const arena_;
|
86 |
+
string data_;
|
87 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ImplicitWeakMessage);
|
88 |
+
};
|
89 |
+
|
90 |
+
// A type handler for use with implicit weak repeated message fields.
|
91 |
+
template <typename ImplicitWeakType>
|
92 |
+
class ImplicitWeakTypeHandler {
|
93 |
+
public:
|
94 |
+
typedef ImplicitWeakType Type;
|
95 |
+
typedef ::google::protobuf::MessageLite WeakType;
|
96 |
+
static const bool Moveable = false;
|
97 |
+
|
98 |
+
// With implicit weak fields, we need separate NewFromPrototype and
|
99 |
+
// NewFromPrototypeWeak functions. The former is used when we want to create a
|
100 |
+
// strong dependency on the message type, and it just delegates to the
|
101 |
+
// GenericTypeHandler. The latter avoids creating a strong dependency, by
|
102 |
+
// simply calling MessageLite::New.
|
103 |
+
static inline ::google::protobuf::MessageLite* NewFromPrototype(
|
104 |
+
const ::google::protobuf::MessageLite* prototype, ::google::protobuf::Arena* arena = NULL) {
|
105 |
+
return prototype->New(arena);
|
106 |
+
}
|
107 |
+
|
108 |
+
static inline void Delete(::google::protobuf::MessageLite* value, Arena* arena) {
|
109 |
+
if (arena == NULL) {
|
110 |
+
delete value;
|
111 |
+
}
|
112 |
+
}
|
113 |
+
static inline ::google::protobuf::Arena* GetArena(::google::protobuf::MessageLite* value) {
|
114 |
+
return value->GetArena();
|
115 |
+
}
|
116 |
+
static inline void* GetMaybeArenaPointer(::google::protobuf::MessageLite* value) {
|
117 |
+
return value->GetArena();
|
118 |
+
}
|
119 |
+
static inline void Clear(::google::protobuf::MessageLite* value) {
|
120 |
+
value->Clear();
|
121 |
+
}
|
122 |
+
static void Merge(const ::google::protobuf::MessageLite& from,
|
123 |
+
::google::protobuf::MessageLite* to) {
|
124 |
+
to->CheckTypeAndMergeFrom(from);
|
125 |
+
}
|
126 |
+
static inline size_t SpaceUsedLong(const Type& value) {
|
127 |
+
return value.SpaceUsedLong();
|
128 |
+
}
|
129 |
+
};
|
130 |
+
|
131 |
+
} // namespace internal
|
132 |
+
} // namespace protobuf
|
133 |
+
|
134 |
+
} // namespace google
|
135 |
+
#endif // GOOGLE_PROTOBUF_IMPLICIT_WEAK_MESSAGE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/inlined_string_field.h
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__
|
32 |
+
#define GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__
|
33 |
+
|
34 |
+
#include <string>
|
35 |
+
|
36 |
+
#include <google/protobuf/stubs/port.h>
|
37 |
+
#include <google/protobuf/stubs/stringpiece.h>
|
38 |
+
|
39 |
+
namespace google {
|
40 |
+
namespace protobuf {
|
41 |
+
|
42 |
+
class Arena;
|
43 |
+
|
44 |
+
namespace internal {
|
45 |
+
|
46 |
+
// InlinedStringField wraps a ::std::string instance and exposes an API similar to
|
47 |
+
// ArenaStringPtr's wrapping of a ::std::string* instance. As ::std::string is never
|
48 |
+
// allocated on the Arena, we expose only the *NoArena methods of
|
49 |
+
// ArenaStringPtr.
|
50 |
+
//
|
51 |
+
// default_value parameters are taken for consistency with ArenaStringPtr, but
|
52 |
+
// are not used for most methods. With inlining, these should be removed from
|
53 |
+
// the generated binary.
|
54 |
+
class LIBPROTOBUF_EXPORT InlinedStringField {
|
55 |
+
public:
|
56 |
+
InlinedStringField()
|
57 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
58 |
+
explicit InlinedStringField(const ::std::string& default_value);
|
59 |
+
|
60 |
+
void AssignWithDefault(const ::std::string* default_value,
|
61 |
+
const InlinedStringField& from)
|
62 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
63 |
+
|
64 |
+
void ClearToEmpty(const ::std::string* default_value, Arena* arena)
|
65 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
66 |
+
ClearToEmptyNoArena(default_value);
|
67 |
+
}
|
68 |
+
void ClearNonDefaultToEmpty() GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
69 |
+
ClearNonDefaultToEmptyNoArena();
|
70 |
+
}
|
71 |
+
void ClearToEmptyNoArena(const ::std::string* default_value)
|
72 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
73 |
+
ClearNonDefaultToEmptyNoArena();
|
74 |
+
}
|
75 |
+
void ClearNonDefaultToEmptyNoArena()
|
76 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
77 |
+
|
78 |
+
void ClearToDefault(const ::std::string* default_value, Arena* arena)
|
79 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
80 |
+
ClearToDefaultNoArena(default_value);
|
81 |
+
}
|
82 |
+
void ClearToDefaultNoArena(const ::std::string* default_value)
|
83 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
84 |
+
|
85 |
+
void Destroy(const ::std::string* default_value, Arena* arena)
|
86 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
87 |
+
DestroyNoArena(default_value);
|
88 |
+
}
|
89 |
+
void DestroyNoArena(const ::std::string* default_value)
|
90 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
91 |
+
|
92 |
+
const ::std::string& Get() const GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
93 |
+
return GetNoArena();
|
94 |
+
}
|
95 |
+
const ::std::string& GetNoArena() const GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
96 |
+
|
97 |
+
::std::string* Mutable(const ::std::string* default_value, Arena* arena)
|
98 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
99 |
+
return MutableNoArena(default_value);
|
100 |
+
}
|
101 |
+
::std::string* MutableNoArena(const ::std::string* default_value)
|
102 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
103 |
+
|
104 |
+
::std::string* Release(const ::std::string* default_value, Arena* arena) {
|
105 |
+
return ReleaseNoArena(default_value);
|
106 |
+
}
|
107 |
+
::std::string* ReleaseNonDefault(const ::std::string* default_value, Arena* arena) {
|
108 |
+
return ReleaseNonDefaultNoArena(default_value);
|
109 |
+
}
|
110 |
+
::std::string* ReleaseNoArena(const ::std::string* default_value) {
|
111 |
+
return ReleaseNonDefaultNoArena(default_value);
|
112 |
+
}
|
113 |
+
::std::string* ReleaseNonDefaultNoArena(const ::std::string* default_value);
|
114 |
+
|
115 |
+
void Set(const ::std::string* default_value,
|
116 |
+
StringPiece value,
|
117 |
+
Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
118 |
+
SetNoArena(default_value, value);
|
119 |
+
}
|
120 |
+
void SetLite(const ::std::string* default_value,
|
121 |
+
StringPiece value,
|
122 |
+
Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
123 |
+
SetNoArena(default_value, value);
|
124 |
+
}
|
125 |
+
void SetNoArena(const ::std::string* default_value,
|
126 |
+
StringPiece value) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
127 |
+
|
128 |
+
void Set(const ::std::string* default_value,
|
129 |
+
const ::std::string& value,
|
130 |
+
Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
131 |
+
SetNoArena(default_value, value);
|
132 |
+
}
|
133 |
+
void SetLite(const ::std::string* default_value,
|
134 |
+
const ::std::string& value,
|
135 |
+
Arena* arena) GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE {
|
136 |
+
SetNoArena(default_value, value);
|
137 |
+
}
|
138 |
+
void SetNoArena(const ::std::string* default_value,
|
139 |
+
const ::std::string& value)
|
140 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
141 |
+
|
142 |
+
#if LANG_CXX11
|
143 |
+
void SetNoArena(const ::std::string* default_value,
|
144 |
+
::std::string&& value)
|
145 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
146 |
+
#endif
|
147 |
+
void SetAllocated(const ::std::string* default_value,
|
148 |
+
::std::string* value,
|
149 |
+
Arena* arena) {
|
150 |
+
SetAllocatedNoArena(default_value, value);
|
151 |
+
}
|
152 |
+
void SetAllocatedNoArena(const ::std::string* default_value,
|
153 |
+
::std::string* value);
|
154 |
+
void Swap(InlinedStringField* from)
|
155 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE;
|
156 |
+
::std::string* UnsafeMutablePointer();
|
157 |
+
void UnsafeSetDefault(const ::std::string* default_value);
|
158 |
+
::std::string* UnsafeArenaRelease(const ::std::string* default_value, Arena* arena);
|
159 |
+
void UnsafeArenaSetAllocated(
|
160 |
+
const ::std::string* default_value, ::std::string* value, Arena* arena);
|
161 |
+
|
162 |
+
bool IsDefault(const ::std::string* default_value) {
|
163 |
+
return false;
|
164 |
+
}
|
165 |
+
private:
|
166 |
+
::std::string value_;
|
167 |
+
};
|
168 |
+
|
169 |
+
inline InlinedStringField::InlinedStringField() {}
|
170 |
+
|
171 |
+
inline InlinedStringField::InlinedStringField(const ::std::string& default_value) :
|
172 |
+
value_(default_value) {}
|
173 |
+
|
174 |
+
inline void InlinedStringField::AssignWithDefault(
|
175 |
+
const ::std::string* default_value, const InlinedStringField& from) {
|
176 |
+
value_ = from.value_;
|
177 |
+
}
|
178 |
+
|
179 |
+
inline const ::std::string& InlinedStringField::GetNoArena() const {
|
180 |
+
return value_;
|
181 |
+
}
|
182 |
+
|
183 |
+
inline ::std::string* InlinedStringField::MutableNoArena(const ::std::string*) {
|
184 |
+
return &value_;
|
185 |
+
}
|
186 |
+
|
187 |
+
inline void InlinedStringField::SetAllocatedNoArena(
|
188 |
+
const ::std::string* default_value, ::std::string* value) {
|
189 |
+
if (value == NULL) {
|
190 |
+
value_.assign(*default_value);
|
191 |
+
} else {
|
192 |
+
#if LANG_CXX11
|
193 |
+
value_.assign(std::move(*value));
|
194 |
+
#else
|
195 |
+
value_.swap(*value);
|
196 |
+
#endif
|
197 |
+
delete value;
|
198 |
+
}
|
199 |
+
}
|
200 |
+
|
201 |
+
inline void InlinedStringField::DestroyNoArena(const ::std::string*) {
|
202 |
+
// This is invoked from the generated message's ArenaDtor, which is used to
|
203 |
+
// clean up objects not allocated on the Arena.
|
204 |
+
this->~InlinedStringField();
|
205 |
+
}
|
206 |
+
|
207 |
+
inline void InlinedStringField::ClearNonDefaultToEmptyNoArena() {
|
208 |
+
value_.clear();
|
209 |
+
}
|
210 |
+
|
211 |
+
inline void InlinedStringField::ClearToDefaultNoArena(
|
212 |
+
const ::std::string* default_value) {
|
213 |
+
value_.assign(*default_value);
|
214 |
+
}
|
215 |
+
|
216 |
+
inline ::std::string* InlinedStringField::ReleaseNonDefaultNoArena(
|
217 |
+
const ::std::string* default_value) {
|
218 |
+
::std::string* released = new ::std::string(*default_value);
|
219 |
+
value_.swap(*released);
|
220 |
+
return released;
|
221 |
+
}
|
222 |
+
|
223 |
+
inline void InlinedStringField::SetNoArena(
|
224 |
+
const ::std::string* default_value, StringPiece value) {
|
225 |
+
value_.assign(value.data(), value.length());
|
226 |
+
}
|
227 |
+
|
228 |
+
inline void InlinedStringField::SetNoArena(
|
229 |
+
const ::std::string* default_value, const ::std::string& value) {
|
230 |
+
value_.assign(value);
|
231 |
+
}
|
232 |
+
|
233 |
+
#if LANG_CXX11
|
234 |
+
inline void InlinedStringField::SetNoArena(
|
235 |
+
const ::std::string* default_value, ::std::string&& value) {
|
236 |
+
value_.assign(std::move(value));
|
237 |
+
}
|
238 |
+
#endif
|
239 |
+
|
240 |
+
inline void InlinedStringField::Swap(InlinedStringField* from) {
|
241 |
+
value_.swap(from->value_);
|
242 |
+
}
|
243 |
+
|
244 |
+
inline ::std::string* InlinedStringField::UnsafeMutablePointer() {
|
245 |
+
return &value_;
|
246 |
+
}
|
247 |
+
|
248 |
+
inline void InlinedStringField::UnsafeSetDefault(
|
249 |
+
const ::std::string* default_value) {
|
250 |
+
value_.assign(*default_value);
|
251 |
+
}
|
252 |
+
|
253 |
+
inline ::std::string* InlinedStringField::UnsafeArenaRelease(
|
254 |
+
const ::std::string* default_value, Arena* arena) {
|
255 |
+
return ReleaseNoArena(default_value);
|
256 |
+
}
|
257 |
+
|
258 |
+
inline void InlinedStringField::UnsafeArenaSetAllocated(
|
259 |
+
const ::std::string* default_value, ::std::string* value, Arena* arena) {
|
260 |
+
if (value == NULL) {
|
261 |
+
value_.assign(*default_value);
|
262 |
+
} else {
|
263 |
+
value_.assign(*value);
|
264 |
+
}
|
265 |
+
}
|
266 |
+
|
267 |
+
} // namespace internal
|
268 |
+
} // namespace protobuf
|
269 |
+
|
270 |
+
} // namespace google
|
271 |
+
#endif // GOOGLE_PROTOBUF_INLINED_STRING_FIELD_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream.h
ADDED
@@ -0,0 +1,1400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This file contains the CodedInputStream and CodedOutputStream classes,
|
36 |
+
// which wrap a ZeroCopyInputStream or ZeroCopyOutputStream, respectively,
|
37 |
+
// and allow you to read or write individual pieces of data in various
|
38 |
+
// formats. In particular, these implement the varint encoding for
|
39 |
+
// integers, a simple variable-length encoding in which smaller numbers
|
40 |
+
// take fewer bytes.
|
41 |
+
//
|
42 |
+
// Typically these classes will only be used internally by the protocol
|
43 |
+
// buffer library in order to encode and decode protocol buffers. Clients
|
44 |
+
// of the library only need to know about this class if they wish to write
|
45 |
+
// custom message parsing or serialization procedures.
|
46 |
+
//
|
47 |
+
// CodedOutputStream example:
|
48 |
+
// // Write some data to "myfile". First we write a 4-byte "magic number"
|
49 |
+
// // to identify the file type, then write a length-delimited string. The
|
50 |
+
// // string is composed of a varint giving the length followed by the raw
|
51 |
+
// // bytes.
|
52 |
+
// int fd = open("myfile", O_CREAT | O_WRONLY);
|
53 |
+
// ZeroCopyOutputStream* raw_output = new FileOutputStream(fd);
|
54 |
+
// CodedOutputStream* coded_output = new CodedOutputStream(raw_output);
|
55 |
+
//
|
56 |
+
// int magic_number = 1234;
|
57 |
+
// char text[] = "Hello world!";
|
58 |
+
// coded_output->WriteLittleEndian32(magic_number);
|
59 |
+
// coded_output->WriteVarint32(strlen(text));
|
60 |
+
// coded_output->WriteRaw(text, strlen(text));
|
61 |
+
//
|
62 |
+
// delete coded_output;
|
63 |
+
// delete raw_output;
|
64 |
+
// close(fd);
|
65 |
+
//
|
66 |
+
// CodedInputStream example:
|
67 |
+
// // Read a file created by the above code.
|
68 |
+
// int fd = open("myfile", O_RDONLY);
|
69 |
+
// ZeroCopyInputStream* raw_input = new FileInputStream(fd);
|
70 |
+
// CodedInputStream coded_input = new CodedInputStream(raw_input);
|
71 |
+
//
|
72 |
+
// coded_input->ReadLittleEndian32(&magic_number);
|
73 |
+
// if (magic_number != 1234) {
|
74 |
+
// cerr << "File not in expected format." << endl;
|
75 |
+
// return;
|
76 |
+
// }
|
77 |
+
//
|
78 |
+
// uint32 size;
|
79 |
+
// coded_input->ReadVarint32(&size);
|
80 |
+
//
|
81 |
+
// char* text = new char[size + 1];
|
82 |
+
// coded_input->ReadRaw(buffer, size);
|
83 |
+
// text[size] = '\0';
|
84 |
+
//
|
85 |
+
// delete coded_input;
|
86 |
+
// delete raw_input;
|
87 |
+
// close(fd);
|
88 |
+
//
|
89 |
+
// cout << "Text is: " << text << endl;
|
90 |
+
// delete [] text;
|
91 |
+
//
|
92 |
+
// For those who are interested, varint encoding is defined as follows:
|
93 |
+
//
|
94 |
+
// The encoding operates on unsigned integers of up to 64 bits in length.
|
95 |
+
// Each byte of the encoded value has the format:
|
96 |
+
// * bits 0-6: Seven bits of the number being encoded.
|
97 |
+
// * bit 7: Zero if this is the last byte in the encoding (in which
|
98 |
+
// case all remaining bits of the number are zero) or 1 if
|
99 |
+
// more bytes follow.
|
100 |
+
// The first byte contains the least-significant 7 bits of the number, the
|
101 |
+
// second byte (if present) contains the next-least-significant 7 bits,
|
102 |
+
// and so on. So, the binary number 1011000101011 would be encoded in two
|
103 |
+
// bytes as "10101011 00101100".
|
104 |
+
//
|
105 |
+
// In theory, varint could be used to encode integers of any length.
|
106 |
+
// However, for practicality we set a limit at 64 bits. The maximum encoded
|
107 |
+
// length of a number is thus 10 bytes.
|
108 |
+
|
109 |
+
#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
|
110 |
+
#define GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
|
111 |
+
|
112 |
+
#include <assert.h>
|
113 |
+
#include <atomic>
|
114 |
+
#include <climits>
|
115 |
+
#include <string>
|
116 |
+
#include <utility>
|
117 |
+
#ifdef _MSC_VER
|
118 |
+
// Assuming windows is always little-endian.
|
119 |
+
#if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
|
120 |
+
#define PROTOBUF_LITTLE_ENDIAN 1
|
121 |
+
#endif
|
122 |
+
#if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
|
123 |
+
// If MSVC has "/RTCc" set, it will complain about truncating casts at
|
124 |
+
// runtime. This file contains some intentional truncating casts.
|
125 |
+
#pragma runtime_checks("c", off)
|
126 |
+
#endif
|
127 |
+
#else
|
128 |
+
#include <sys/param.h> // __BYTE_ORDER
|
129 |
+
#if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
|
130 |
+
(defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \
|
131 |
+
!defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
|
132 |
+
#define PROTOBUF_LITTLE_ENDIAN 1
|
133 |
+
#endif
|
134 |
+
#endif
|
135 |
+
#include <google/protobuf/stubs/common.h>
|
136 |
+
#include <google/protobuf/stubs/port.h>
|
137 |
+
#include <google/protobuf/stubs/port.h>
|
138 |
+
|
139 |
+
namespace google {
|
140 |
+
|
141 |
+
namespace protobuf {
|
142 |
+
|
143 |
+
class DescriptorPool;
|
144 |
+
class MessageFactory;
|
145 |
+
|
146 |
+
namespace internal { void MapTestForceDeterministic(); }
|
147 |
+
|
148 |
+
namespace io {
|
149 |
+
|
150 |
+
// Defined in this file.
|
151 |
+
class CodedInputStream;
|
152 |
+
class CodedOutputStream;
|
153 |
+
|
154 |
+
// Defined in other files.
|
155 |
+
class ZeroCopyInputStream; // zero_copy_stream.h
|
156 |
+
class ZeroCopyOutputStream; // zero_copy_stream.h
|
157 |
+
|
158 |
+
// Class which reads and decodes binary data which is composed of varint-
|
159 |
+
// encoded integers and fixed-width pieces. Wraps a ZeroCopyInputStream.
|
160 |
+
// Most users will not need to deal with CodedInputStream.
|
161 |
+
//
|
162 |
+
// Most methods of CodedInputStream that return a bool return false if an
|
163 |
+
// underlying I/O error occurs or if the data is malformed. Once such a
|
164 |
+
// failure occurs, the CodedInputStream is broken and is no longer useful.
|
165 |
+
class LIBPROTOBUF_EXPORT CodedInputStream {
|
166 |
+
public:
|
167 |
+
// Create a CodedInputStream that reads from the given ZeroCopyInputStream.
|
168 |
+
explicit CodedInputStream(ZeroCopyInputStream* input);
|
169 |
+
|
170 |
+
// Create a CodedInputStream that reads from the given flat array. This is
|
171 |
+
// faster than using an ArrayInputStream. PushLimit(size) is implied by
|
172 |
+
// this constructor.
|
173 |
+
explicit CodedInputStream(const uint8* buffer, int size);
|
174 |
+
|
175 |
+
// Destroy the CodedInputStream and position the underlying
|
176 |
+
// ZeroCopyInputStream at the first unread byte. If an error occurred while
|
177 |
+
// reading (causing a method to return false), then the exact position of
|
178 |
+
// the input stream may be anywhere between the last value that was read
|
179 |
+
// successfully and the stream's byte limit.
|
180 |
+
~CodedInputStream();
|
181 |
+
|
182 |
+
// Return true if this CodedInputStream reads from a flat array instead of
|
183 |
+
// a ZeroCopyInputStream.
|
184 |
+
inline bool IsFlat() const;
|
185 |
+
|
186 |
+
// Skips a number of bytes. Returns false if an underlying read error
|
187 |
+
// occurs.
|
188 |
+
inline bool Skip(int count);
|
189 |
+
|
190 |
+
// Sets *data to point directly at the unread part of the CodedInputStream's
|
191 |
+
// underlying buffer, and *size to the size of that buffer, but does not
|
192 |
+
// advance the stream's current position. This will always either produce
|
193 |
+
// a non-empty buffer or return false. If the caller consumes any of
|
194 |
+
// this data, it should then call Skip() to skip over the consumed bytes.
|
195 |
+
// This may be useful for implementing external fast parsing routines for
|
196 |
+
// types of data not covered by the CodedInputStream interface.
|
197 |
+
bool GetDirectBufferPointer(const void** data, int* size);
|
198 |
+
|
199 |
+
// Like GetDirectBufferPointer, but this method is inlined, and does not
|
200 |
+
// attempt to Refresh() if the buffer is currently empty.
|
201 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
202 |
+
void GetDirectBufferPointerInline(const void** data, int* size);
|
203 |
+
|
204 |
+
// Read raw bytes, copying them into the given buffer.
|
205 |
+
bool ReadRaw(void* buffer, int size);
|
206 |
+
|
207 |
+
// Like the above, with inlined optimizations. This should only be used
|
208 |
+
// by the protobuf implementation.
|
209 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
210 |
+
bool InternalReadRawInline(void* buffer, int size);
|
211 |
+
|
212 |
+
// Like ReadRaw, but reads into a string.
|
213 |
+
bool ReadString(string* buffer, int size);
|
214 |
+
// Like the above, with inlined optimizations. This should only be used
|
215 |
+
// by the protobuf implementation.
|
216 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
217 |
+
bool InternalReadStringInline(string* buffer, int size);
|
218 |
+
|
219 |
+
|
220 |
+
// Read a 32-bit little-endian integer.
|
221 |
+
bool ReadLittleEndian32(uint32* value);
|
222 |
+
// Read a 64-bit little-endian integer.
|
223 |
+
bool ReadLittleEndian64(uint64* value);
|
224 |
+
|
225 |
+
// These methods read from an externally provided buffer. The caller is
|
226 |
+
// responsible for ensuring that the buffer has sufficient space.
|
227 |
+
// Read a 32-bit little-endian integer.
|
228 |
+
static const uint8* ReadLittleEndian32FromArray(const uint8* buffer,
|
229 |
+
uint32* value);
|
230 |
+
// Read a 64-bit little-endian integer.
|
231 |
+
static const uint8* ReadLittleEndian64FromArray(const uint8* buffer,
|
232 |
+
uint64* value);
|
233 |
+
|
234 |
+
// Read an unsigned integer with Varint encoding, truncating to 32 bits.
|
235 |
+
// Reading a 32-bit value is equivalent to reading a 64-bit one and casting
|
236 |
+
// it to uint32, but may be more efficient.
|
237 |
+
bool ReadVarint32(uint32* value);
|
238 |
+
// Read an unsigned integer with Varint encoding.
|
239 |
+
bool ReadVarint64(uint64* value);
|
240 |
+
|
241 |
+
// Reads a varint off the wire into an "int". This should be used for reading
|
242 |
+
// sizes off the wire (sizes of strings, submessages, bytes fields, etc).
|
243 |
+
//
|
244 |
+
// The value from the wire is interpreted as unsigned. If its value exceeds
|
245 |
+
// the representable value of an integer on this platform, instead of
|
246 |
+
// truncating we return false. Truncating (as performed by ReadVarint32()
|
247 |
+
// above) is an acceptable approach for fields representing an integer, but
|
248 |
+
// when we are parsing a size from the wire, truncating the value would result
|
249 |
+
// in us misparsing the payload.
|
250 |
+
bool ReadVarintSizeAsInt(int* value);
|
251 |
+
|
252 |
+
// Read a tag. This calls ReadVarint32() and returns the result, or returns
|
253 |
+
// zero (which is not a valid tag) if ReadVarint32() fails. Also, ReadTag
|
254 |
+
// (but not ReadTagNoLastTag) updates the last tag value, which can be checked
|
255 |
+
// with LastTagWas().
|
256 |
+
//
|
257 |
+
// Always inline because this is only called in one place per parse loop
|
258 |
+
// but it is called for every iteration of said loop, so it should be fast.
|
259 |
+
// GCC doesn't want to inline this by default.
|
260 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE uint32 ReadTag() {
|
261 |
+
return last_tag_ = ReadTagNoLastTag();
|
262 |
+
}
|
263 |
+
|
264 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE uint32 ReadTagNoLastTag();
|
265 |
+
|
266 |
+
|
267 |
+
// This usually a faster alternative to ReadTag() when cutoff is a manifest
|
268 |
+
// constant. It does particularly well for cutoff >= 127. The first part
|
269 |
+
// of the return value is the tag that was read, though it can also be 0 in
|
270 |
+
// the cases where ReadTag() would return 0. If the second part is true
|
271 |
+
// then the tag is known to be in [0, cutoff]. If not, the tag either is
|
272 |
+
// above cutoff or is 0. (There's intentional wiggle room when tag is 0,
|
273 |
+
// because that can arise in several ways, and for best performance we want
|
274 |
+
// to avoid an extra "is tag == 0?" check here.)
|
275 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
276 |
+
std::pair<uint32, bool> ReadTagWithCutoff(uint32 cutoff) {
|
277 |
+
std::pair<uint32, bool> result = ReadTagWithCutoffNoLastTag(cutoff);
|
278 |
+
last_tag_ = result.first;
|
279 |
+
return result;
|
280 |
+
}
|
281 |
+
|
282 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
283 |
+
std::pair<uint32, bool> ReadTagWithCutoffNoLastTag(uint32 cutoff);
|
284 |
+
|
285 |
+
// Usually returns true if calling ReadVarint32() now would produce the given
|
286 |
+
// value. Will always return false if ReadVarint32() would not return the
|
287 |
+
// given value. If ExpectTag() returns true, it also advances past
|
288 |
+
// the varint. For best performance, use a compile-time constant as the
|
289 |
+
// parameter.
|
290 |
+
// Always inline because this collapses to a small number of instructions
|
291 |
+
// when given a constant parameter, but GCC doesn't want to inline by default.
|
292 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool ExpectTag(uint32 expected);
|
293 |
+
|
294 |
+
// Like above, except this reads from the specified buffer. The caller is
|
295 |
+
// responsible for ensuring that the buffer is large enough to read a varint
|
296 |
+
// of the expected size. For best performance, use a compile-time constant as
|
297 |
+
// the expected tag parameter.
|
298 |
+
//
|
299 |
+
// Returns a pointer beyond the expected tag if it was found, or NULL if it
|
300 |
+
// was not.
|
301 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
302 |
+
static const uint8* ExpectTagFromArray(const uint8* buffer, uint32 expected);
|
303 |
+
|
304 |
+
// Usually returns true if no more bytes can be read. Always returns false
|
305 |
+
// if more bytes can be read. If ExpectAtEnd() returns true, a subsequent
|
306 |
+
// call to LastTagWas() will act as if ReadTag() had been called and returned
|
307 |
+
// zero, and ConsumedEntireMessage() will return true.
|
308 |
+
bool ExpectAtEnd();
|
309 |
+
|
310 |
+
// If the last call to ReadTag() or ReadTagWithCutoff() returned the given
|
311 |
+
// value, returns true. Otherwise, returns false.
|
312 |
+
// ReadTagNoLastTag/ReadTagWithCutoffNoLastTag do not preserve the last
|
313 |
+
// returned value.
|
314 |
+
//
|
315 |
+
// This is needed because parsers for some types of embedded messages
|
316 |
+
// (with field type TYPE_GROUP) don't actually know that they've reached the
|
317 |
+
// end of a message until they see an ENDGROUP tag, which was actually part
|
318 |
+
// of the enclosing message. The enclosing message would like to check that
|
319 |
+
// tag to make sure it had the right number, so it calls LastTagWas() on
|
320 |
+
// return from the embedded parser to check.
|
321 |
+
bool LastTagWas(uint32 expected);
|
322 |
+
void SetLastTag(uint32 tag) { last_tag_ = tag; }
|
323 |
+
|
324 |
+
// When parsing message (but NOT a group), this method must be called
|
325 |
+
// immediately after MergeFromCodedStream() returns (if it returns true)
|
326 |
+
// to further verify that the message ended in a legitimate way. For
|
327 |
+
// example, this verifies that parsing did not end on an end-group tag.
|
328 |
+
// It also checks for some cases where, due to optimizations,
|
329 |
+
// MergeFromCodedStream() can incorrectly return true.
|
330 |
+
bool ConsumedEntireMessage();
|
331 |
+
|
332 |
+
// Limits ----------------------------------------------------------
|
333 |
+
// Limits are used when parsing length-delimited embedded messages.
|
334 |
+
// After the message's length is read, PushLimit() is used to prevent
|
335 |
+
// the CodedInputStream from reading beyond that length. Once the
|
336 |
+
// embedded message has been parsed, PopLimit() is called to undo the
|
337 |
+
// limit.
|
338 |
+
|
339 |
+
// Opaque type used with PushLimit() and PopLimit(). Do not modify
|
340 |
+
// values of this type yourself. The only reason that this isn't a
|
341 |
+
// struct with private internals is for efficiency.
|
342 |
+
typedef int Limit;
|
343 |
+
|
344 |
+
// Places a limit on the number of bytes that the stream may read,
|
345 |
+
// starting from the current position. Once the stream hits this limit,
|
346 |
+
// it will act like the end of the input has been reached until PopLimit()
|
347 |
+
// is called.
|
348 |
+
//
|
349 |
+
// As the names imply, the stream conceptually has a stack of limits. The
|
350 |
+
// shortest limit on the stack is always enforced, even if it is not the
|
351 |
+
// top limit.
|
352 |
+
//
|
353 |
+
// The value returned by PushLimit() is opaque to the caller, and must
|
354 |
+
// be passed unchanged to the corresponding call to PopLimit().
|
355 |
+
Limit PushLimit(int byte_limit);
|
356 |
+
|
357 |
+
// Pops the last limit pushed by PushLimit(). The input must be the value
|
358 |
+
// returned by that call to PushLimit().
|
359 |
+
void PopLimit(Limit limit);
|
360 |
+
|
361 |
+
// Returns the number of bytes left until the nearest limit on the
|
362 |
+
// stack is hit, or -1 if no limits are in place.
|
363 |
+
int BytesUntilLimit() const;
|
364 |
+
|
365 |
+
// Returns current position relative to the beginning of the input stream.
|
366 |
+
int CurrentPosition() const;
|
367 |
+
|
368 |
+
// Total Bytes Limit -----------------------------------------------
|
369 |
+
// To prevent malicious users from sending excessively large messages
|
370 |
+
// and causing memory exhaustion, CodedInputStream imposes a hard limit on
|
371 |
+
// the total number of bytes it will read.
|
372 |
+
|
373 |
+
// Sets the maximum number of bytes that this CodedInputStream will read
|
374 |
+
// before refusing to continue. To prevent servers from allocating enormous
|
375 |
+
// amounts of memory to hold parsed messages, the maximum message length
|
376 |
+
// should be limited to the shortest length that will not harm usability.
|
377 |
+
// The default limit is INT_MAX (~2GB) and apps should set shorter limits
|
378 |
+
// if possible. An error will always be printed to stderr if the limit is
|
379 |
+
// reached.
|
380 |
+
//
|
381 |
+
// Note: setting a limit less than the current read position is interpreted
|
382 |
+
// as a limit on the current position.
|
383 |
+
//
|
384 |
+
// This is unrelated to PushLimit()/PopLimit().
|
385 |
+
void SetTotalBytesLimit(int total_bytes_limit);
|
386 |
+
|
387 |
+
PROTOBUF_RUNTIME_DEPRECATED(
|
388 |
+
"Please use the single parameter version of SetTotalBytesLimit(). The "
|
389 |
+
"second parameter is ignored.")
|
390 |
+
void SetTotalBytesLimit(int total_bytes_limit, int) {
|
391 |
+
SetTotalBytesLimit(total_bytes_limit);
|
392 |
+
}
|
393 |
+
|
394 |
+
// The Total Bytes Limit minus the Current Position, or -1 if the total bytes
|
395 |
+
// limit is INT_MAX.
|
396 |
+
int BytesUntilTotalBytesLimit() const;
|
397 |
+
|
398 |
+
// Recursion Limit -------------------------------------------------
|
399 |
+
// To prevent corrupt or malicious messages from causing stack overflows,
|
400 |
+
// we must keep track of the depth of recursion when parsing embedded
|
401 |
+
// messages and groups. CodedInputStream keeps track of this because it
|
402 |
+
// is the only object that is passed down the stack during parsing.
|
403 |
+
|
404 |
+
// Sets the maximum recursion depth. The default is 100.
|
405 |
+
void SetRecursionLimit(int limit);
|
406 |
+
|
407 |
+
|
408 |
+
// Increments the current recursion depth. Returns true if the depth is
|
409 |
+
// under the limit, false if it has gone over.
|
410 |
+
bool IncrementRecursionDepth();
|
411 |
+
|
412 |
+
// Decrements the recursion depth if possible.
|
413 |
+
void DecrementRecursionDepth();
|
414 |
+
|
415 |
+
// Decrements the recursion depth blindly. This is faster than
|
416 |
+
// DecrementRecursionDepth(). It should be used only if all previous
|
417 |
+
// increments to recursion depth were successful.
|
418 |
+
void UnsafeDecrementRecursionDepth();
|
419 |
+
|
420 |
+
// Shorthand for make_pair(PushLimit(byte_limit), --recursion_budget_).
|
421 |
+
// Using this can reduce code size and complexity in some cases. The caller
|
422 |
+
// is expected to check that the second part of the result is non-negative (to
|
423 |
+
// bail out if the depth of recursion is too high) and, if all is well, to
|
424 |
+
// later pass the first part of the result to PopLimit() or similar.
|
425 |
+
std::pair<CodedInputStream::Limit, int> IncrementRecursionDepthAndPushLimit(
|
426 |
+
int byte_limit);
|
427 |
+
|
428 |
+
// Shorthand for PushLimit(ReadVarint32(&length) ? length : 0).
|
429 |
+
Limit ReadLengthAndPushLimit();
|
430 |
+
|
431 |
+
// Helper that is equivalent to: {
|
432 |
+
// bool result = ConsumedEntireMessage();
|
433 |
+
// PopLimit(limit);
|
434 |
+
// UnsafeDecrementRecursionDepth();
|
435 |
+
// return result; }
|
436 |
+
// Using this can reduce code size and complexity in some cases.
|
437 |
+
// Do not use unless the current recursion depth is greater than zero.
|
438 |
+
bool DecrementRecursionDepthAndPopLimit(Limit limit);
|
439 |
+
|
440 |
+
// Helper that is equivalent to: {
|
441 |
+
// bool result = ConsumedEntireMessage();
|
442 |
+
// PopLimit(limit);
|
443 |
+
// return result; }
|
444 |
+
// Using this can reduce code size and complexity in some cases.
|
445 |
+
bool CheckEntireMessageConsumedAndPopLimit(Limit limit);
|
446 |
+
|
447 |
+
// Extension Registry ----------------------------------------------
|
448 |
+
// ADVANCED USAGE: 99.9% of people can ignore this section.
|
449 |
+
//
|
450 |
+
// By default, when parsing extensions, the parser looks for extension
|
451 |
+
// definitions in the pool which owns the outer message's Descriptor.
|
452 |
+
// However, you may call SetExtensionRegistry() to provide an alternative
|
453 |
+
// pool instead. This makes it possible, for example, to parse a message
|
454 |
+
// using a generated class, but represent some extensions using
|
455 |
+
// DynamicMessage.
|
456 |
+
|
457 |
+
// Set the pool used to look up extensions. Most users do not need to call
|
458 |
+
// this as the correct pool will be chosen automatically.
|
459 |
+
//
|
460 |
+
// WARNING: It is very easy to misuse this. Carefully read the requirements
|
461 |
+
// below. Do not use this unless you are sure you need it. Almost no one
|
462 |
+
// does.
|
463 |
+
//
|
464 |
+
// Let's say you are parsing a message into message object m, and you want
|
465 |
+
// to take advantage of SetExtensionRegistry(). You must follow these
|
466 |
+
// requirements:
|
467 |
+
//
|
468 |
+
// The given DescriptorPool must contain m->GetDescriptor(). It is not
|
469 |
+
// sufficient for it to simply contain a descriptor that has the same name
|
470 |
+
// and content -- it must be the *exact object*. In other words:
|
471 |
+
// assert(pool->FindMessageTypeByName(m->GetDescriptor()->full_name()) ==
|
472 |
+
// m->GetDescriptor());
|
473 |
+
// There are two ways to satisfy this requirement:
|
474 |
+
// 1) Use m->GetDescriptor()->pool() as the pool. This is generally useless
|
475 |
+
// because this is the pool that would be used anyway if you didn't call
|
476 |
+
// SetExtensionRegistry() at all.
|
477 |
+
// 2) Use a DescriptorPool which has m->GetDescriptor()->pool() as an
|
478 |
+
// "underlay". Read the documentation for DescriptorPool for more
|
479 |
+
// information about underlays.
|
480 |
+
//
|
481 |
+
// You must also provide a MessageFactory. This factory will be used to
|
482 |
+
// construct Message objects representing extensions. The factory's
|
483 |
+
// GetPrototype() MUST return non-NULL for any Descriptor which can be found
|
484 |
+
// through the provided pool.
|
485 |
+
//
|
486 |
+
// If the provided factory might return instances of protocol-compiler-
|
487 |
+
// generated (i.e. compiled-in) types, or if the outer message object m is
|
488 |
+
// a generated type, then the given factory MUST have this property: If
|
489 |
+
// GetPrototype() is given a Descriptor which resides in
|
490 |
+
// DescriptorPool::generated_pool(), the factory MUST return the same
|
491 |
+
// prototype which MessageFactory::generated_factory() would return. That
|
492 |
+
// is, given a descriptor for a generated type, the factory must return an
|
493 |
+
// instance of the generated class (NOT DynamicMessage). However, when
|
494 |
+
// given a descriptor for a type that is NOT in generated_pool, the factory
|
495 |
+
// is free to return any implementation.
|
496 |
+
//
|
497 |
+
// The reason for this requirement is that generated sub-objects may be
|
498 |
+
// accessed via the standard (non-reflection) extension accessor methods,
|
499 |
+
// and these methods will down-cast the object to the generated class type.
|
500 |
+
// If the object is not actually of that type, the results would be undefined.
|
501 |
+
// On the other hand, if an extension is not compiled in, then there is no
|
502 |
+
// way the code could end up accessing it via the standard accessors -- the
|
503 |
+
// only way to access the extension is via reflection. When using reflection,
|
504 |
+
// DynamicMessage and generated messages are indistinguishable, so it's fine
|
505 |
+
// if these objects are represented using DynamicMessage.
|
506 |
+
//
|
507 |
+
// Using DynamicMessageFactory on which you have called
|
508 |
+
// SetDelegateToGeneratedFactory(true) should be sufficient to satisfy the
|
509 |
+
// above requirement.
|
510 |
+
//
|
511 |
+
// If either pool or factory is NULL, both must be NULL.
|
512 |
+
//
|
513 |
+
// Note that this feature is ignored when parsing "lite" messages as they do
|
514 |
+
// not have descriptors.
|
515 |
+
void SetExtensionRegistry(const DescriptorPool* pool,
|
516 |
+
MessageFactory* factory);
|
517 |
+
|
518 |
+
// Get the DescriptorPool set via SetExtensionRegistry(), or NULL if no pool
|
519 |
+
// has been provided.
|
520 |
+
const DescriptorPool* GetExtensionPool();
|
521 |
+
|
522 |
+
// Get the MessageFactory set via SetExtensionRegistry(), or NULL if no
|
523 |
+
// factory has been provided.
|
524 |
+
MessageFactory* GetExtensionFactory();
|
525 |
+
|
526 |
+
private:
|
527 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedInputStream);
|
528 |
+
|
529 |
+
const uint8* buffer_;
|
530 |
+
const uint8* buffer_end_; // pointer to the end of the buffer.
|
531 |
+
ZeroCopyInputStream* input_;
|
532 |
+
int total_bytes_read_; // total bytes read from input_, including
|
533 |
+
// the current buffer
|
534 |
+
|
535 |
+
// If total_bytes_read_ surpasses INT_MAX, we record the extra bytes here
|
536 |
+
// so that we can BackUp() on destruction.
|
537 |
+
int overflow_bytes_;
|
538 |
+
|
539 |
+
// LastTagWas() stuff.
|
540 |
+
uint32 last_tag_; // result of last ReadTag() or ReadTagWithCutoff().
|
541 |
+
|
542 |
+
// This is set true by ReadTag{Fallback/Slow}() if it is called when exactly
|
543 |
+
// at EOF, or by ExpectAtEnd() when it returns true. This happens when we
|
544 |
+
// reach the end of a message and attempt to read another tag.
|
545 |
+
bool legitimate_message_end_;
|
546 |
+
|
547 |
+
// See EnableAliasing().
|
548 |
+
bool aliasing_enabled_;
|
549 |
+
|
550 |
+
// Limits
|
551 |
+
Limit current_limit_; // if position = -1, no limit is applied
|
552 |
+
|
553 |
+
// For simplicity, if the current buffer crosses a limit (either a normal
|
554 |
+
// limit created by PushLimit() or the total bytes limit), buffer_size_
|
555 |
+
// only tracks the number of bytes before that limit. This field
|
556 |
+
// contains the number of bytes after it. Note that this implies that if
|
557 |
+
// buffer_size_ == 0 and buffer_size_after_limit_ > 0, we know we've
|
558 |
+
// hit a limit. However, if both are zero, it doesn't necessarily mean
|
559 |
+
// we aren't at a limit -- the buffer may have ended exactly at the limit.
|
560 |
+
int buffer_size_after_limit_;
|
561 |
+
|
562 |
+
// Maximum number of bytes to read, period. This is unrelated to
|
563 |
+
// current_limit_. Set using SetTotalBytesLimit().
|
564 |
+
int total_bytes_limit_;
|
565 |
+
|
566 |
+
// Current recursion budget, controlled by IncrementRecursionDepth() and
|
567 |
+
// similar. Starts at recursion_limit_ and goes down: if this reaches
|
568 |
+
// -1 we are over budget.
|
569 |
+
int recursion_budget_;
|
570 |
+
// Recursion depth limit, set by SetRecursionLimit().
|
571 |
+
int recursion_limit_;
|
572 |
+
|
573 |
+
// See SetExtensionRegistry().
|
574 |
+
const DescriptorPool* extension_pool_;
|
575 |
+
MessageFactory* extension_factory_;
|
576 |
+
|
577 |
+
// Private member functions.
|
578 |
+
|
579 |
+
// Fallback when Skip() goes past the end of the current buffer.
|
580 |
+
bool SkipFallback(int count, int original_buffer_size);
|
581 |
+
|
582 |
+
// Advance the buffer by a given number of bytes.
|
583 |
+
void Advance(int amount);
|
584 |
+
|
585 |
+
// Back up input_ to the current buffer position.
|
586 |
+
void BackUpInputToCurrentPosition();
|
587 |
+
|
588 |
+
// Recomputes the value of buffer_size_after_limit_. Must be called after
|
589 |
+
// current_limit_ or total_bytes_limit_ changes.
|
590 |
+
void RecomputeBufferLimits();
|
591 |
+
|
592 |
+
// Writes an error message saying that we hit total_bytes_limit_.
|
593 |
+
void PrintTotalBytesLimitError();
|
594 |
+
|
595 |
+
// Called when the buffer runs out to request more data. Implies an
|
596 |
+
// Advance(BufferSize()).
|
597 |
+
bool Refresh();
|
598 |
+
|
599 |
+
// When parsing varints, we optimize for the common case of small values, and
|
600 |
+
// then optimize for the case when the varint fits within the current buffer
|
601 |
+
// piece. The Fallback method is used when we can't use the one-byte
|
602 |
+
// optimization. The Slow method is yet another fallback when the buffer is
|
603 |
+
// not large enough. Making the slow path out-of-line speeds up the common
|
604 |
+
// case by 10-15%. The slow path is fairly uncommon: it only triggers when a
|
605 |
+
// message crosses multiple buffers. Note: ReadVarint32Fallback() and
|
606 |
+
// ReadVarint64Fallback() are called frequently and generally not inlined, so
|
607 |
+
// they have been optimized to avoid "out" parameters. The former returns -1
|
608 |
+
// if it fails and the uint32 it read otherwise. The latter has a bool
|
609 |
+
// indicating success or failure as part of its return type.
|
610 |
+
int64 ReadVarint32Fallback(uint32 first_byte_or_zero);
|
611 |
+
int ReadVarintSizeAsIntFallback();
|
612 |
+
std::pair<uint64, bool> ReadVarint64Fallback();
|
613 |
+
bool ReadVarint32Slow(uint32* value);
|
614 |
+
bool ReadVarint64Slow(uint64* value);
|
615 |
+
int ReadVarintSizeAsIntSlow();
|
616 |
+
bool ReadLittleEndian32Fallback(uint32* value);
|
617 |
+
bool ReadLittleEndian64Fallback(uint64* value);
|
618 |
+
|
619 |
+
// Fallback/slow methods for reading tags. These do not update last_tag_,
|
620 |
+
// but will set legitimate_message_end_ if we are at the end of the input
|
621 |
+
// stream.
|
622 |
+
uint32 ReadTagFallback(uint32 first_byte_or_zero);
|
623 |
+
uint32 ReadTagSlow();
|
624 |
+
bool ReadStringFallback(string* buffer, int size);
|
625 |
+
|
626 |
+
// Return the size of the buffer.
|
627 |
+
int BufferSize() const;
|
628 |
+
|
629 |
+
static const int kDefaultTotalBytesLimit = INT_MAX;
|
630 |
+
|
631 |
+
static int default_recursion_limit_; // 100 by default.
|
632 |
+
};
|
633 |
+
|
634 |
+
// Class which encodes and writes binary data which is composed of varint-
|
635 |
+
// encoded integers and fixed-width pieces. Wraps a ZeroCopyOutputStream.
|
636 |
+
// Most users will not need to deal with CodedOutputStream.
|
637 |
+
//
|
638 |
+
// Most methods of CodedOutputStream which return a bool return false if an
|
639 |
+
// underlying I/O error occurs. Once such a failure occurs, the
|
640 |
+
// CodedOutputStream is broken and is no longer useful. The Write* methods do
|
641 |
+
// not return the stream status, but will invalidate the stream if an error
|
642 |
+
// occurs. The client can probe HadError() to determine the status.
|
643 |
+
//
|
644 |
+
// Note that every method of CodedOutputStream which writes some data has
|
645 |
+
// a corresponding static "ToArray" version. These versions write directly
|
646 |
+
// to the provided buffer, returning a pointer past the last written byte.
|
647 |
+
// They require that the buffer has sufficient capacity for the encoded data.
|
648 |
+
// This allows an optimization where we check if an output stream has enough
|
649 |
+
// space for an entire message before we start writing and, if there is, we
|
650 |
+
// call only the ToArray methods to avoid doing bound checks for each
|
651 |
+
// individual value.
|
652 |
+
// i.e., in the example above:
|
653 |
+
//
|
654 |
+
// CodedOutputStream coded_output = new CodedOutputStream(raw_output);
|
655 |
+
// int magic_number = 1234;
|
656 |
+
// char text[] = "Hello world!";
|
657 |
+
//
|
658 |
+
// int coded_size = sizeof(magic_number) +
|
659 |
+
// CodedOutputStream::VarintSize32(strlen(text)) +
|
660 |
+
// strlen(text);
|
661 |
+
//
|
662 |
+
// uint8* buffer =
|
663 |
+
// coded_output->GetDirectBufferForNBytesAndAdvance(coded_size);
|
664 |
+
// if (buffer != NULL) {
|
665 |
+
// // The output stream has enough space in the buffer: write directly to
|
666 |
+
// // the array.
|
667 |
+
// buffer = CodedOutputStream::WriteLittleEndian32ToArray(magic_number,
|
668 |
+
// buffer);
|
669 |
+
// buffer = CodedOutputStream::WriteVarint32ToArray(strlen(text), buffer);
|
670 |
+
// buffer = CodedOutputStream::WriteRawToArray(text, strlen(text), buffer);
|
671 |
+
// } else {
|
672 |
+
// // Make bound-checked writes, which will ask the underlying stream for
|
673 |
+
// // more space as needed.
|
674 |
+
// coded_output->WriteLittleEndian32(magic_number);
|
675 |
+
// coded_output->WriteVarint32(strlen(text));
|
676 |
+
// coded_output->WriteRaw(text, strlen(text));
|
677 |
+
// }
|
678 |
+
//
|
679 |
+
// delete coded_output;
|
680 |
+
class LIBPROTOBUF_EXPORT CodedOutputStream {
|
681 |
+
public:
|
682 |
+
// Create an CodedOutputStream that writes to the given ZeroCopyOutputStream.
|
683 |
+
explicit CodedOutputStream(ZeroCopyOutputStream* output);
|
684 |
+
CodedOutputStream(ZeroCopyOutputStream* output, bool do_eager_refresh);
|
685 |
+
|
686 |
+
// Destroy the CodedOutputStream and position the underlying
|
687 |
+
// ZeroCopyOutputStream immediately after the last byte written.
|
688 |
+
~CodedOutputStream();
|
689 |
+
|
690 |
+
// Trims any unused space in the underlying buffer so that its size matches
|
691 |
+
// the number of bytes written by this stream. The underlying buffer will
|
692 |
+
// automatically be trimmed when this stream is destroyed; this call is only
|
693 |
+
// necessary if the underlying buffer is accessed *before* the stream is
|
694 |
+
// destroyed.
|
695 |
+
void Trim();
|
696 |
+
|
697 |
+
// Skips a number of bytes, leaving the bytes unmodified in the underlying
|
698 |
+
// buffer. Returns false if an underlying write error occurs. This is
|
699 |
+
// mainly useful with GetDirectBufferPointer().
|
700 |
+
bool Skip(int count);
|
701 |
+
|
702 |
+
// Sets *data to point directly at the unwritten part of the
|
703 |
+
// CodedOutputStream's underlying buffer, and *size to the size of that
|
704 |
+
// buffer, but does not advance the stream's current position. This will
|
705 |
+
// always either produce a non-empty buffer or return false. If the caller
|
706 |
+
// writes any data to this buffer, it should then call Skip() to skip over
|
707 |
+
// the consumed bytes. This may be useful for implementing external fast
|
708 |
+
// serialization routines for types of data not covered by the
|
709 |
+
// CodedOutputStream interface.
|
710 |
+
bool GetDirectBufferPointer(void** data, int* size);
|
711 |
+
|
712 |
+
// If there are at least "size" bytes available in the current buffer,
|
713 |
+
// returns a pointer directly into the buffer and advances over these bytes.
|
714 |
+
// The caller may then write directly into this buffer (e.g. using the
|
715 |
+
// *ToArray static methods) rather than go through CodedOutputStream. If
|
716 |
+
// there are not enough bytes available, returns NULL. The return pointer is
|
717 |
+
// invalidated as soon as any other non-const method of CodedOutputStream
|
718 |
+
// is called.
|
719 |
+
inline uint8* GetDirectBufferForNBytesAndAdvance(int size);
|
720 |
+
|
721 |
+
// Write raw bytes, copying them from the given buffer.
|
722 |
+
void WriteRaw(const void* buffer, int size);
|
723 |
+
// Like WriteRaw() but will try to write aliased data if aliasing is
|
724 |
+
// turned on.
|
725 |
+
void WriteRawMaybeAliased(const void* data, int size);
|
726 |
+
// Like WriteRaw() but writing directly to the target array.
|
727 |
+
// This is _not_ inlined, as the compiler often optimizes memcpy into inline
|
728 |
+
// copy loops. Since this gets called by every field with string or bytes
|
729 |
+
// type, inlining may lead to a significant amount of code bloat, with only a
|
730 |
+
// minor performance gain.
|
731 |
+
static uint8* WriteRawToArray(const void* buffer, int size, uint8* target);
|
732 |
+
|
733 |
+
// Equivalent to WriteRaw(str.data(), str.size()).
|
734 |
+
void WriteString(const string& str);
|
735 |
+
// Like WriteString() but writing directly to the target array.
|
736 |
+
static uint8* WriteStringToArray(const string& str, uint8* target);
|
737 |
+
// Write the varint-encoded size of str followed by str.
|
738 |
+
static uint8* WriteStringWithSizeToArray(const string& str, uint8* target);
|
739 |
+
|
740 |
+
|
741 |
+
// Instructs the CodedOutputStream to allow the underlying
|
742 |
+
// ZeroCopyOutputStream to hold pointers to the original structure instead of
|
743 |
+
// copying, if it supports it (i.e. output->AllowsAliasing() is true). If the
|
744 |
+
// underlying stream does not support aliasing, then enabling it has no
|
745 |
+
// affect. For now, this only affects the behavior of
|
746 |
+
// WriteRawMaybeAliased().
|
747 |
+
//
|
748 |
+
// NOTE: It is caller's responsibility to ensure that the chunk of memory
|
749 |
+
// remains live until all of the data has been consumed from the stream.
|
750 |
+
void EnableAliasing(bool enabled);
|
751 |
+
|
752 |
+
// Write a 32-bit little-endian integer.
|
753 |
+
void WriteLittleEndian32(uint32 value);
|
754 |
+
// Like WriteLittleEndian32() but writing directly to the target array.
|
755 |
+
static uint8* WriteLittleEndian32ToArray(uint32 value, uint8* target);
|
756 |
+
// Write a 64-bit little-endian integer.
|
757 |
+
void WriteLittleEndian64(uint64 value);
|
758 |
+
// Like WriteLittleEndian64() but writing directly to the target array.
|
759 |
+
static uint8* WriteLittleEndian64ToArray(uint64 value, uint8* target);
|
760 |
+
|
761 |
+
// Write an unsigned integer with Varint encoding. Writing a 32-bit value
|
762 |
+
// is equivalent to casting it to uint64 and writing it as a 64-bit value,
|
763 |
+
// but may be more efficient.
|
764 |
+
void WriteVarint32(uint32 value);
|
765 |
+
// Like WriteVarint32() but writing directly to the target array.
|
766 |
+
static uint8* WriteVarint32ToArray(uint32 value, uint8* target);
|
767 |
+
// Write an unsigned integer with Varint encoding.
|
768 |
+
void WriteVarint64(uint64 value);
|
769 |
+
// Like WriteVarint64() but writing directly to the target array.
|
770 |
+
static uint8* WriteVarint64ToArray(uint64 value, uint8* target);
|
771 |
+
|
772 |
+
// Equivalent to WriteVarint32() except when the value is negative,
|
773 |
+
// in which case it must be sign-extended to a full 10 bytes.
|
774 |
+
void WriteVarint32SignExtended(int32 value);
|
775 |
+
// Like WriteVarint32SignExtended() but writing directly to the target array.
|
776 |
+
static uint8* WriteVarint32SignExtendedToArray(int32 value, uint8* target);
|
777 |
+
|
778 |
+
// This is identical to WriteVarint32(), but optimized for writing tags.
|
779 |
+
// In particular, if the input is a compile-time constant, this method
|
780 |
+
// compiles down to a couple instructions.
|
781 |
+
// Always inline because otherwise the aformentioned optimization can't work,
|
782 |
+
// but GCC by default doesn't want to inline this.
|
783 |
+
void WriteTag(uint32 value);
|
784 |
+
// Like WriteTag() but writing directly to the target array.
|
785 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
786 |
+
static uint8* WriteTagToArray(uint32 value, uint8* target);
|
787 |
+
|
788 |
+
// Returns the number of bytes needed to encode the given value as a varint.
|
789 |
+
static size_t VarintSize32(uint32 value);
|
790 |
+
// Returns the number of bytes needed to encode the given value as a varint.
|
791 |
+
static size_t VarintSize64(uint64 value);
|
792 |
+
|
793 |
+
// If negative, 10 bytes. Otheriwse, same as VarintSize32().
|
794 |
+
static size_t VarintSize32SignExtended(int32 value);
|
795 |
+
|
796 |
+
// Compile-time equivalent of VarintSize32().
|
797 |
+
template <uint32 Value>
|
798 |
+
struct StaticVarintSize32 {
|
799 |
+
static const size_t value =
|
800 |
+
(Value < (1 << 7))
|
801 |
+
? 1
|
802 |
+
: (Value < (1 << 14))
|
803 |
+
? 2
|
804 |
+
: (Value < (1 << 21))
|
805 |
+
? 3
|
806 |
+
: (Value < (1 << 28))
|
807 |
+
? 4
|
808 |
+
: 5;
|
809 |
+
};
|
810 |
+
|
811 |
+
// Returns the total number of bytes written since this object was created.
|
812 |
+
inline int ByteCount() const;
|
813 |
+
|
814 |
+
// Returns true if there was an underlying I/O error since this object was
|
815 |
+
// created.
|
816 |
+
bool HadError() const { return had_error_; }
|
817 |
+
|
818 |
+
// Deterministic serialization, if requested, guarantees that for a given
|
819 |
+
// binary, equal messages will always be serialized to the same bytes. This
|
820 |
+
// implies:
|
821 |
+
// . repeated serialization of a message will return the same bytes
|
822 |
+
// . different processes of the same binary (which may be executing on
|
823 |
+
// different machines) will serialize equal messages to the same bytes.
|
824 |
+
//
|
825 |
+
// Note the deterministic serialization is NOT canonical across languages; it
|
826 |
+
// is also unstable across different builds with schema changes due to unknown
|
827 |
+
// fields. Users who need canonical serialization, e.g., persistent storage in
|
828 |
+
// a canonical form, fingerprinting, etc., should define their own
|
829 |
+
// canonicalization specification and implement the serializer using
|
830 |
+
// reflection APIs rather than relying on this API.
|
831 |
+
//
|
832 |
+
// If deterministic serialization is requested, the serializer will
|
833 |
+
// sort map entries by keys in lexicographical order or numerical order.
|
834 |
+
// (This is an implementation detail and may subject to change.)
|
835 |
+
//
|
836 |
+
// There are two ways to determine whether serialization should be
|
837 |
+
// deterministic for this CodedOutputStream. If SetSerializationDeterministic
|
838 |
+
// has not yet been called, then the default comes from the global default,
|
839 |
+
// which is false, until SetDefaultSerializationDeterministic has been called.
|
840 |
+
// Otherwise, SetSerializationDeterministic has been called, and the last
|
841 |
+
// value passed to it is all that matters.
|
842 |
+
void SetSerializationDeterministic(bool value) {
|
843 |
+
is_serialization_deterministic_ = value;
|
844 |
+
}
|
845 |
+
// See above. Also, note that users of this CodedOutputStream may need to
|
846 |
+
// call IsSerializationDeterministic() to serialize in the intended way. This
|
847 |
+
// CodedOutputStream cannot enforce a desire for deterministic serialization
|
848 |
+
// by itself.
|
849 |
+
bool IsSerializationDeterministic() const {
|
850 |
+
return is_serialization_deterministic_;
|
851 |
+
}
|
852 |
+
|
853 |
+
static bool IsDefaultSerializationDeterministic() {
|
854 |
+
return default_serialization_deterministic_.load(std::memory_order_relaxed) != 0;
|
855 |
+
}
|
856 |
+
|
857 |
+
private:
|
858 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedOutputStream);
|
859 |
+
|
860 |
+
ZeroCopyOutputStream* output_;
|
861 |
+
uint8* buffer_;
|
862 |
+
int buffer_size_;
|
863 |
+
int total_bytes_; // Sum of sizes of all buffers seen so far.
|
864 |
+
bool had_error_; // Whether an error occurred during output.
|
865 |
+
bool aliasing_enabled_; // See EnableAliasing().
|
866 |
+
bool is_serialization_deterministic_;
|
867 |
+
static std::atomic<bool> default_serialization_deterministic_;
|
868 |
+
|
869 |
+
// Advance the buffer by a given number of bytes.
|
870 |
+
void Advance(int amount);
|
871 |
+
|
872 |
+
// Called when the buffer runs out to request more data. Implies an
|
873 |
+
// Advance(buffer_size_).
|
874 |
+
bool Refresh();
|
875 |
+
|
876 |
+
// Like WriteRaw() but may avoid copying if the underlying
|
877 |
+
// ZeroCopyOutputStream supports it.
|
878 |
+
void WriteAliasedRaw(const void* buffer, int size);
|
879 |
+
|
880 |
+
// If this write might cross the end of the buffer, we compose the bytes first
|
881 |
+
// then use WriteRaw().
|
882 |
+
void WriteVarint32SlowPath(uint32 value);
|
883 |
+
void WriteVarint64SlowPath(uint64 value);
|
884 |
+
|
885 |
+
// See above. Other projects may use "friend" to allow them to call this.
|
886 |
+
// After SetDefaultSerializationDeterministic() completes, all protocol
|
887 |
+
// buffer serializations will be deterministic by default. Thread safe.
|
888 |
+
// However, the meaning of "after" is subtle here: to be safe, each thread
|
889 |
+
// that wants deterministic serialization by default needs to call
|
890 |
+
// SetDefaultSerializationDeterministic() or ensure on its own that another
|
891 |
+
// thread has done so.
|
892 |
+
friend void ::google::protobuf::internal::MapTestForceDeterministic();
|
893 |
+
static void SetDefaultSerializationDeterministic() {
|
894 |
+
default_serialization_deterministic_.store(true, std::memory_order_relaxed);
|
895 |
+
}
|
896 |
+
};
|
897 |
+
|
898 |
+
// inline methods ====================================================
|
899 |
+
// The vast majority of varints are only one byte. These inline
|
900 |
+
// methods optimize for that case.
|
901 |
+
|
902 |
+
inline bool CodedInputStream::ReadVarint32(uint32* value) {
|
903 |
+
uint32 v = 0;
|
904 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {
|
905 |
+
v = *buffer_;
|
906 |
+
if (v < 0x80) {
|
907 |
+
*value = v;
|
908 |
+
Advance(1);
|
909 |
+
return true;
|
910 |
+
}
|
911 |
+
}
|
912 |
+
int64 result = ReadVarint32Fallback(v);
|
913 |
+
*value = static_cast<uint32>(result);
|
914 |
+
return result >= 0;
|
915 |
+
}
|
916 |
+
|
917 |
+
inline bool CodedInputStream::ReadVarint64(uint64* value) {
|
918 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && *buffer_ < 0x80) {
|
919 |
+
*value = *buffer_;
|
920 |
+
Advance(1);
|
921 |
+
return true;
|
922 |
+
}
|
923 |
+
std::pair<uint64, bool> p = ReadVarint64Fallback();
|
924 |
+
*value = p.first;
|
925 |
+
return p.second;
|
926 |
+
}
|
927 |
+
|
928 |
+
inline bool CodedInputStream::ReadVarintSizeAsInt(int* value) {
|
929 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {
|
930 |
+
int v = *buffer_;
|
931 |
+
if (v < 0x80) {
|
932 |
+
*value = v;
|
933 |
+
Advance(1);
|
934 |
+
return true;
|
935 |
+
}
|
936 |
+
}
|
937 |
+
*value = ReadVarintSizeAsIntFallback();
|
938 |
+
return *value >= 0;
|
939 |
+
}
|
940 |
+
|
941 |
+
// static
|
942 |
+
inline const uint8* CodedInputStream::ReadLittleEndian32FromArray(
|
943 |
+
const uint8* buffer,
|
944 |
+
uint32* value) {
|
945 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
946 |
+
memcpy(value, buffer, sizeof(*value));
|
947 |
+
return buffer + sizeof(*value);
|
948 |
+
#else
|
949 |
+
*value = (static_cast<uint32>(buffer[0]) ) |
|
950 |
+
(static_cast<uint32>(buffer[1]) << 8) |
|
951 |
+
(static_cast<uint32>(buffer[2]) << 16) |
|
952 |
+
(static_cast<uint32>(buffer[3]) << 24);
|
953 |
+
return buffer + sizeof(*value);
|
954 |
+
#endif
|
955 |
+
}
|
956 |
+
// static
|
957 |
+
inline const uint8* CodedInputStream::ReadLittleEndian64FromArray(
|
958 |
+
const uint8* buffer,
|
959 |
+
uint64* value) {
|
960 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
961 |
+
memcpy(value, buffer, sizeof(*value));
|
962 |
+
return buffer + sizeof(*value);
|
963 |
+
#else
|
964 |
+
uint32 part0 = (static_cast<uint32>(buffer[0]) ) |
|
965 |
+
(static_cast<uint32>(buffer[1]) << 8) |
|
966 |
+
(static_cast<uint32>(buffer[2]) << 16) |
|
967 |
+
(static_cast<uint32>(buffer[3]) << 24);
|
968 |
+
uint32 part1 = (static_cast<uint32>(buffer[4]) ) |
|
969 |
+
(static_cast<uint32>(buffer[5]) << 8) |
|
970 |
+
(static_cast<uint32>(buffer[6]) << 16) |
|
971 |
+
(static_cast<uint32>(buffer[7]) << 24);
|
972 |
+
*value = static_cast<uint64>(part0) |
|
973 |
+
(static_cast<uint64>(part1) << 32);
|
974 |
+
return buffer + sizeof(*value);
|
975 |
+
#endif
|
976 |
+
}
|
977 |
+
|
978 |
+
inline bool CodedInputStream::ReadLittleEndian32(uint32* value) {
|
979 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
980 |
+
if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
|
981 |
+
buffer_ = ReadLittleEndian32FromArray(buffer_, value);
|
982 |
+
return true;
|
983 |
+
} else {
|
984 |
+
return ReadLittleEndian32Fallback(value);
|
985 |
+
}
|
986 |
+
#else
|
987 |
+
return ReadLittleEndian32Fallback(value);
|
988 |
+
#endif
|
989 |
+
}
|
990 |
+
|
991 |
+
inline bool CodedInputStream::ReadLittleEndian64(uint64* value) {
|
992 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
993 |
+
if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
|
994 |
+
buffer_ = ReadLittleEndian64FromArray(buffer_, value);
|
995 |
+
return true;
|
996 |
+
} else {
|
997 |
+
return ReadLittleEndian64Fallback(value);
|
998 |
+
}
|
999 |
+
#else
|
1000 |
+
return ReadLittleEndian64Fallback(value);
|
1001 |
+
#endif
|
1002 |
+
}
|
1003 |
+
|
1004 |
+
inline uint32 CodedInputStream::ReadTagNoLastTag() {
|
1005 |
+
uint32 v = 0;
|
1006 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {
|
1007 |
+
v = *buffer_;
|
1008 |
+
if (v < 0x80) {
|
1009 |
+
Advance(1);
|
1010 |
+
return v;
|
1011 |
+
}
|
1012 |
+
}
|
1013 |
+
v = ReadTagFallback(v);
|
1014 |
+
return v;
|
1015 |
+
}
|
1016 |
+
|
1017 |
+
inline std::pair<uint32, bool> CodedInputStream::ReadTagWithCutoffNoLastTag(
|
1018 |
+
uint32 cutoff) {
|
1019 |
+
// In performance-sensitive code we can expect cutoff to be a compile-time
|
1020 |
+
// constant, and things like "cutoff >= kMax1ByteVarint" to be evaluated at
|
1021 |
+
// compile time.
|
1022 |
+
uint32 first_byte_or_zero = 0;
|
1023 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {
|
1024 |
+
// Hot case: buffer_ non_empty, buffer_[0] in [1, 128).
|
1025 |
+
// TODO(gpike): Is it worth rearranging this? E.g., if the number of fields
|
1026 |
+
// is large enough then is it better to check for the two-byte case first?
|
1027 |
+
first_byte_or_zero = buffer_[0];
|
1028 |
+
if (static_cast<int8>(buffer_[0]) > 0) {
|
1029 |
+
const uint32 kMax1ByteVarint = 0x7f;
|
1030 |
+
uint32 tag = buffer_[0];
|
1031 |
+
Advance(1);
|
1032 |
+
return std::make_pair(tag, cutoff >= kMax1ByteVarint || tag <= cutoff);
|
1033 |
+
}
|
1034 |
+
// Other hot case: cutoff >= 0x80, buffer_ has at least two bytes available,
|
1035 |
+
// and tag is two bytes. The latter is tested by bitwise-and-not of the
|
1036 |
+
// first byte and the second byte.
|
1037 |
+
if (cutoff >= 0x80 && GOOGLE_PREDICT_TRUE(buffer_ + 1 < buffer_end_) &&
|
1038 |
+
GOOGLE_PREDICT_TRUE((buffer_[0] & ~buffer_[1]) >= 0x80)) {
|
1039 |
+
const uint32 kMax2ByteVarint = (0x7f << 7) + 0x7f;
|
1040 |
+
uint32 tag = (1u << 7) * buffer_[1] + (buffer_[0] - 0x80);
|
1041 |
+
Advance(2);
|
1042 |
+
// It might make sense to test for tag == 0 now, but it is so rare that
|
1043 |
+
// that we don't bother. A varint-encoded 0 should be one byte unless
|
1044 |
+
// the encoder lost its mind. The second part of the return value of
|
1045 |
+
// this function is allowed to be either true or false if the tag is 0,
|
1046 |
+
// so we don't have to check for tag == 0. We may need to check whether
|
1047 |
+
// it exceeds cutoff.
|
1048 |
+
bool at_or_below_cutoff = cutoff >= kMax2ByteVarint || tag <= cutoff;
|
1049 |
+
return std::make_pair(tag, at_or_below_cutoff);
|
1050 |
+
}
|
1051 |
+
}
|
1052 |
+
// Slow path
|
1053 |
+
const uint32 tag = ReadTagFallback(first_byte_or_zero);
|
1054 |
+
return std::make_pair(tag, static_cast<uint32>(tag - 1) < cutoff);
|
1055 |
+
}
|
1056 |
+
|
1057 |
+
inline bool CodedInputStream::LastTagWas(uint32 expected) {
|
1058 |
+
return last_tag_ == expected;
|
1059 |
+
}
|
1060 |
+
|
1061 |
+
inline bool CodedInputStream::ConsumedEntireMessage() {
|
1062 |
+
return legitimate_message_end_;
|
1063 |
+
}
|
1064 |
+
|
1065 |
+
inline bool CodedInputStream::ExpectTag(uint32 expected) {
|
1066 |
+
if (expected < (1 << 7)) {
|
1067 |
+
if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && buffer_[0] == expected) {
|
1068 |
+
Advance(1);
|
1069 |
+
return true;
|
1070 |
+
} else {
|
1071 |
+
return false;
|
1072 |
+
}
|
1073 |
+
} else if (expected < (1 << 14)) {
|
1074 |
+
if (GOOGLE_PREDICT_TRUE(BufferSize() >= 2) &&
|
1075 |
+
buffer_[0] == static_cast<uint8>(expected | 0x80) &&
|
1076 |
+
buffer_[1] == static_cast<uint8>(expected >> 7)) {
|
1077 |
+
Advance(2);
|
1078 |
+
return true;
|
1079 |
+
} else {
|
1080 |
+
return false;
|
1081 |
+
}
|
1082 |
+
} else {
|
1083 |
+
// Don't bother optimizing for larger values.
|
1084 |
+
return false;
|
1085 |
+
}
|
1086 |
+
}
|
1087 |
+
|
1088 |
+
inline const uint8* CodedInputStream::ExpectTagFromArray(
|
1089 |
+
const uint8* buffer, uint32 expected) {
|
1090 |
+
if (expected < (1 << 7)) {
|
1091 |
+
if (buffer[0] == expected) {
|
1092 |
+
return buffer + 1;
|
1093 |
+
}
|
1094 |
+
} else if (expected < (1 << 14)) {
|
1095 |
+
if (buffer[0] == static_cast<uint8>(expected | 0x80) &&
|
1096 |
+
buffer[1] == static_cast<uint8>(expected >> 7)) {
|
1097 |
+
return buffer + 2;
|
1098 |
+
}
|
1099 |
+
}
|
1100 |
+
return NULL;
|
1101 |
+
}
|
1102 |
+
|
1103 |
+
inline void CodedInputStream::GetDirectBufferPointerInline(const void** data,
|
1104 |
+
int* size) {
|
1105 |
+
*data = buffer_;
|
1106 |
+
*size = static_cast<int>(buffer_end_ - buffer_);
|
1107 |
+
}
|
1108 |
+
|
1109 |
+
inline bool CodedInputStream::ExpectAtEnd() {
|
1110 |
+
// If we are at a limit we know no more bytes can be read. Otherwise, it's
|
1111 |
+
// hard to say without calling Refresh(), and we'd rather not do that.
|
1112 |
+
|
1113 |
+
if (buffer_ == buffer_end_ &&
|
1114 |
+
((buffer_size_after_limit_ != 0) ||
|
1115 |
+
(total_bytes_read_ == current_limit_))) {
|
1116 |
+
last_tag_ = 0; // Pretend we called ReadTag()...
|
1117 |
+
legitimate_message_end_ = true; // ... and it hit EOF.
|
1118 |
+
return true;
|
1119 |
+
} else {
|
1120 |
+
return false;
|
1121 |
+
}
|
1122 |
+
}
|
1123 |
+
|
1124 |
+
inline int CodedInputStream::CurrentPosition() const {
|
1125 |
+
return total_bytes_read_ - (BufferSize() + buffer_size_after_limit_);
|
1126 |
+
}
|
1127 |
+
|
1128 |
+
inline uint8* CodedOutputStream::GetDirectBufferForNBytesAndAdvance(int size) {
|
1129 |
+
if (buffer_size_ < size) {
|
1130 |
+
return NULL;
|
1131 |
+
} else {
|
1132 |
+
uint8* result = buffer_;
|
1133 |
+
Advance(size);
|
1134 |
+
return result;
|
1135 |
+
}
|
1136 |
+
}
|
1137 |
+
|
1138 |
+
inline uint8* CodedOutputStream::WriteVarint32ToArray(uint32 value,
|
1139 |
+
uint8* target) {
|
1140 |
+
while (value >= 0x80) {
|
1141 |
+
*target = static_cast<uint8>(value | 0x80);
|
1142 |
+
value >>= 7;
|
1143 |
+
++target;
|
1144 |
+
}
|
1145 |
+
*target = static_cast<uint8>(value);
|
1146 |
+
return target + 1;
|
1147 |
+
}
|
1148 |
+
|
1149 |
+
inline uint8* CodedOutputStream::WriteVarint64ToArray(uint64 value,
|
1150 |
+
uint8* target) {
|
1151 |
+
while (value >= 0x80) {
|
1152 |
+
*target = static_cast<uint8>(value | 0x80);
|
1153 |
+
value >>= 7;
|
1154 |
+
++target;
|
1155 |
+
}
|
1156 |
+
*target = static_cast<uint8>(value);
|
1157 |
+
return target + 1;
|
1158 |
+
}
|
1159 |
+
|
1160 |
+
inline void CodedOutputStream::WriteVarint32SignExtended(int32 value) {
|
1161 |
+
WriteVarint64(static_cast<uint64>(value));
|
1162 |
+
}
|
1163 |
+
|
1164 |
+
inline uint8* CodedOutputStream::WriteVarint32SignExtendedToArray(
|
1165 |
+
int32 value, uint8* target) {
|
1166 |
+
return WriteVarint64ToArray(static_cast<uint64>(value), target);
|
1167 |
+
}
|
1168 |
+
|
1169 |
+
inline uint8* CodedOutputStream::WriteLittleEndian32ToArray(uint32 value,
|
1170 |
+
uint8* target) {
|
1171 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
1172 |
+
memcpy(target, &value, sizeof(value));
|
1173 |
+
#else
|
1174 |
+
target[0] = static_cast<uint8>(value);
|
1175 |
+
target[1] = static_cast<uint8>(value >> 8);
|
1176 |
+
target[2] = static_cast<uint8>(value >> 16);
|
1177 |
+
target[3] = static_cast<uint8>(value >> 24);
|
1178 |
+
#endif
|
1179 |
+
return target + sizeof(value);
|
1180 |
+
}
|
1181 |
+
|
1182 |
+
inline uint8* CodedOutputStream::WriteLittleEndian64ToArray(uint64 value,
|
1183 |
+
uint8* target) {
|
1184 |
+
#if defined(PROTOBUF_LITTLE_ENDIAN)
|
1185 |
+
memcpy(target, &value, sizeof(value));
|
1186 |
+
#else
|
1187 |
+
uint32 part0 = static_cast<uint32>(value);
|
1188 |
+
uint32 part1 = static_cast<uint32>(value >> 32);
|
1189 |
+
|
1190 |
+
target[0] = static_cast<uint8>(part0);
|
1191 |
+
target[1] = static_cast<uint8>(part0 >> 8);
|
1192 |
+
target[2] = static_cast<uint8>(part0 >> 16);
|
1193 |
+
target[3] = static_cast<uint8>(part0 >> 24);
|
1194 |
+
target[4] = static_cast<uint8>(part1);
|
1195 |
+
target[5] = static_cast<uint8>(part1 >> 8);
|
1196 |
+
target[6] = static_cast<uint8>(part1 >> 16);
|
1197 |
+
target[7] = static_cast<uint8>(part1 >> 24);
|
1198 |
+
#endif
|
1199 |
+
return target + sizeof(value);
|
1200 |
+
}
|
1201 |
+
|
1202 |
+
inline void CodedOutputStream::WriteVarint32(uint32 value) {
|
1203 |
+
if (buffer_size_ >= 5) {
|
1204 |
+
// Fast path: We have enough bytes left in the buffer to guarantee that
|
1205 |
+
// this write won't cross the end, so we can skip the checks.
|
1206 |
+
uint8* target = buffer_;
|
1207 |
+
uint8* end = WriteVarint32ToArray(value, target);
|
1208 |
+
int size = static_cast<int>(end - target);
|
1209 |
+
Advance(size);
|
1210 |
+
} else {
|
1211 |
+
WriteVarint32SlowPath(value);
|
1212 |
+
}
|
1213 |
+
}
|
1214 |
+
|
1215 |
+
inline void CodedOutputStream::WriteVarint64(uint64 value) {
|
1216 |
+
if (buffer_size_ >= 10) {
|
1217 |
+
// Fast path: We have enough bytes left in the buffer to guarantee that
|
1218 |
+
// this write won't cross the end, so we can skip the checks.
|
1219 |
+
uint8* target = buffer_;
|
1220 |
+
uint8* end = WriteVarint64ToArray(value, target);
|
1221 |
+
int size = static_cast<int>(end - target);
|
1222 |
+
Advance(size);
|
1223 |
+
} else {
|
1224 |
+
WriteVarint64SlowPath(value);
|
1225 |
+
}
|
1226 |
+
}
|
1227 |
+
|
1228 |
+
inline void CodedOutputStream::WriteTag(uint32 value) {
|
1229 |
+
WriteVarint32(value);
|
1230 |
+
}
|
1231 |
+
|
1232 |
+
inline uint8* CodedOutputStream::WriteTagToArray(
|
1233 |
+
uint32 value, uint8* target) {
|
1234 |
+
return WriteVarint32ToArray(value, target);
|
1235 |
+
}
|
1236 |
+
|
1237 |
+
inline size_t CodedOutputStream::VarintSize32(uint32 value) {
|
1238 |
+
// This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
|
1239 |
+
// Use an explicit multiplication to implement the divide of
|
1240 |
+
// a number in the 1..31 range.
|
1241 |
+
// Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
|
1242 |
+
// undefined.
|
1243 |
+
uint32 log2value = Bits::Log2FloorNonZero(value | 0x1);
|
1244 |
+
return static_cast<size_t>((log2value * 9 + 73) / 64);
|
1245 |
+
}
|
1246 |
+
|
1247 |
+
inline size_t CodedOutputStream::VarintSize64(uint64 value) {
|
1248 |
+
// This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
|
1249 |
+
// Use an explicit multiplication to implement the divide of
|
1250 |
+
// a number in the 1..63 range.
|
1251 |
+
// Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
|
1252 |
+
// undefined.
|
1253 |
+
uint32 log2value = Bits::Log2FloorNonZero64(value | 0x1);
|
1254 |
+
return static_cast<size_t>((log2value * 9 + 73) / 64);
|
1255 |
+
}
|
1256 |
+
|
1257 |
+
inline size_t CodedOutputStream::VarintSize32SignExtended(int32 value) {
|
1258 |
+
if (value < 0) {
|
1259 |
+
return 10; // TODO(kenton): Make this a symbolic constant.
|
1260 |
+
} else {
|
1261 |
+
return VarintSize32(static_cast<uint32>(value));
|
1262 |
+
}
|
1263 |
+
}
|
1264 |
+
|
1265 |
+
inline void CodedOutputStream::WriteString(const string& str) {
|
1266 |
+
WriteRaw(str.data(), static_cast<int>(str.size()));
|
1267 |
+
}
|
1268 |
+
|
1269 |
+
inline void CodedOutputStream::WriteRawMaybeAliased(
|
1270 |
+
const void* data, int size) {
|
1271 |
+
if (aliasing_enabled_) {
|
1272 |
+
WriteAliasedRaw(data, size);
|
1273 |
+
} else {
|
1274 |
+
WriteRaw(data, size);
|
1275 |
+
}
|
1276 |
+
}
|
1277 |
+
|
1278 |
+
inline uint8* CodedOutputStream::WriteStringToArray(
|
1279 |
+
const string& str, uint8* target) {
|
1280 |
+
return WriteRawToArray(str.data(), static_cast<int>(str.size()), target);
|
1281 |
+
}
|
1282 |
+
|
1283 |
+
inline int CodedOutputStream::ByteCount() const {
|
1284 |
+
return total_bytes_ - buffer_size_;
|
1285 |
+
}
|
1286 |
+
|
1287 |
+
inline void CodedInputStream::Advance(int amount) {
|
1288 |
+
buffer_ += amount;
|
1289 |
+
}
|
1290 |
+
|
1291 |
+
inline void CodedOutputStream::Advance(int amount) {
|
1292 |
+
buffer_ += amount;
|
1293 |
+
buffer_size_ -= amount;
|
1294 |
+
}
|
1295 |
+
|
1296 |
+
inline void CodedInputStream::SetRecursionLimit(int limit) {
|
1297 |
+
recursion_budget_ += limit - recursion_limit_;
|
1298 |
+
recursion_limit_ = limit;
|
1299 |
+
}
|
1300 |
+
|
1301 |
+
inline bool CodedInputStream::IncrementRecursionDepth() {
|
1302 |
+
--recursion_budget_;
|
1303 |
+
return recursion_budget_ >= 0;
|
1304 |
+
}
|
1305 |
+
|
1306 |
+
inline void CodedInputStream::DecrementRecursionDepth() {
|
1307 |
+
if (recursion_budget_ < recursion_limit_) ++recursion_budget_;
|
1308 |
+
}
|
1309 |
+
|
1310 |
+
inline void CodedInputStream::UnsafeDecrementRecursionDepth() {
|
1311 |
+
assert(recursion_budget_ < recursion_limit_);
|
1312 |
+
++recursion_budget_;
|
1313 |
+
}
|
1314 |
+
|
1315 |
+
inline void CodedInputStream::SetExtensionRegistry(const DescriptorPool* pool,
|
1316 |
+
MessageFactory* factory) {
|
1317 |
+
extension_pool_ = pool;
|
1318 |
+
extension_factory_ = factory;
|
1319 |
+
}
|
1320 |
+
|
1321 |
+
inline const DescriptorPool* CodedInputStream::GetExtensionPool() {
|
1322 |
+
return extension_pool_;
|
1323 |
+
}
|
1324 |
+
|
1325 |
+
inline MessageFactory* CodedInputStream::GetExtensionFactory() {
|
1326 |
+
return extension_factory_;
|
1327 |
+
}
|
1328 |
+
|
1329 |
+
inline int CodedInputStream::BufferSize() const {
|
1330 |
+
return static_cast<int>(buffer_end_ - buffer_);
|
1331 |
+
}
|
1332 |
+
|
1333 |
+
inline CodedInputStream::CodedInputStream(ZeroCopyInputStream* input)
|
1334 |
+
: buffer_(NULL),
|
1335 |
+
buffer_end_(NULL),
|
1336 |
+
input_(input),
|
1337 |
+
total_bytes_read_(0),
|
1338 |
+
overflow_bytes_(0),
|
1339 |
+
last_tag_(0),
|
1340 |
+
legitimate_message_end_(false),
|
1341 |
+
aliasing_enabled_(false),
|
1342 |
+
current_limit_(kint32max),
|
1343 |
+
buffer_size_after_limit_(0),
|
1344 |
+
total_bytes_limit_(kDefaultTotalBytesLimit),
|
1345 |
+
recursion_budget_(default_recursion_limit_),
|
1346 |
+
recursion_limit_(default_recursion_limit_),
|
1347 |
+
extension_pool_(NULL),
|
1348 |
+
extension_factory_(NULL) {
|
1349 |
+
// Eagerly Refresh() so buffer space is immediately available.
|
1350 |
+
Refresh();
|
1351 |
+
}
|
1352 |
+
|
1353 |
+
inline CodedInputStream::CodedInputStream(const uint8* buffer, int size)
|
1354 |
+
: buffer_(buffer),
|
1355 |
+
buffer_end_(buffer + size),
|
1356 |
+
input_(NULL),
|
1357 |
+
total_bytes_read_(size),
|
1358 |
+
overflow_bytes_(0),
|
1359 |
+
last_tag_(0),
|
1360 |
+
legitimate_message_end_(false),
|
1361 |
+
aliasing_enabled_(false),
|
1362 |
+
current_limit_(size),
|
1363 |
+
buffer_size_after_limit_(0),
|
1364 |
+
total_bytes_limit_(kDefaultTotalBytesLimit),
|
1365 |
+
recursion_budget_(default_recursion_limit_),
|
1366 |
+
recursion_limit_(default_recursion_limit_),
|
1367 |
+
extension_pool_(NULL),
|
1368 |
+
extension_factory_(NULL) {
|
1369 |
+
// Note that setting current_limit_ == size is important to prevent some
|
1370 |
+
// code paths from trying to access input_ and segfaulting.
|
1371 |
+
}
|
1372 |
+
|
1373 |
+
inline bool CodedInputStream::IsFlat() const {
|
1374 |
+
return input_ == NULL;
|
1375 |
+
}
|
1376 |
+
|
1377 |
+
inline bool CodedInputStream::Skip(int count) {
|
1378 |
+
if (count < 0) return false; // security: count is often user-supplied
|
1379 |
+
|
1380 |
+
const int original_buffer_size = BufferSize();
|
1381 |
+
|
1382 |
+
if (count <= original_buffer_size) {
|
1383 |
+
// Just skipping within the current buffer. Easy.
|
1384 |
+
Advance(count);
|
1385 |
+
return true;
|
1386 |
+
}
|
1387 |
+
|
1388 |
+
return SkipFallback(count, original_buffer_size);
|
1389 |
+
}
|
1390 |
+
|
1391 |
+
} // namespace io
|
1392 |
+
} // namespace protobuf
|
1393 |
+
|
1394 |
+
|
1395 |
+
#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
|
1396 |
+
#pragma runtime_checks("c", restore)
|
1397 |
+
#endif // _MSC_VER && !defined(__INTEL_COMPILER)
|
1398 |
+
|
1399 |
+
} // namespace google
|
1400 |
+
#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/coded_stream_inl.h
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Jason Hsueh)
|
32 |
+
//
|
33 |
+
// Implements methods of coded_stream.h that need to be inlined for performance
|
34 |
+
// reasons, but should not be defined in a public header.
|
35 |
+
|
36 |
+
#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__
|
37 |
+
#define GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__
|
38 |
+
|
39 |
+
#include <google/protobuf/stubs/logging.h>
|
40 |
+
#include <google/protobuf/stubs/common.h>
|
41 |
+
#include <google/protobuf/io/coded_stream.h>
|
42 |
+
#include <google/protobuf/io/zero_copy_stream_impl_lite.h>
|
43 |
+
#include <string>
|
44 |
+
#include <google/protobuf/stubs/stl_util.h>
|
45 |
+
|
46 |
+
namespace google {
|
47 |
+
namespace protobuf {
|
48 |
+
namespace io {
|
49 |
+
|
50 |
+
inline bool CodedInputStream::InternalReadStringInline(string* buffer,
|
51 |
+
int size) {
|
52 |
+
if (size < 0) return false; // security: size is often user-supplied
|
53 |
+
|
54 |
+
if (BufferSize() >= size) {
|
55 |
+
STLStringResizeUninitialized(buffer, size);
|
56 |
+
std::pair<char*, bool> z = as_string_data(buffer);
|
57 |
+
if (z.second) {
|
58 |
+
// Oddly enough, memcpy() requires its first two args to be non-NULL even
|
59 |
+
// if we copy 0 bytes. So, we have ensured that z.first is non-NULL here.
|
60 |
+
GOOGLE_DCHECK(z.first != NULL);
|
61 |
+
memcpy(z.first, buffer_, size);
|
62 |
+
Advance(size);
|
63 |
+
}
|
64 |
+
return true;
|
65 |
+
}
|
66 |
+
|
67 |
+
return ReadStringFallback(buffer, size);
|
68 |
+
}
|
69 |
+
|
70 |
+
inline bool CodedInputStream::InternalReadRawInline(void* buffer, int size) {
|
71 |
+
int current_buffer_size;
|
72 |
+
while ((current_buffer_size = BufferSize()) < size) {
|
73 |
+
// Reading past end of buffer. Copy what we have, then refresh.
|
74 |
+
memcpy(buffer, buffer_, current_buffer_size);
|
75 |
+
buffer = reinterpret_cast<uint8*>(buffer) + current_buffer_size;
|
76 |
+
size -= current_buffer_size;
|
77 |
+
Advance(current_buffer_size);
|
78 |
+
if (!Refresh()) return false;
|
79 |
+
}
|
80 |
+
|
81 |
+
memcpy(buffer, buffer_, size);
|
82 |
+
Advance(size);
|
83 |
+
|
84 |
+
return true;
|
85 |
+
}
|
86 |
+
|
87 |
+
} // namespace io
|
88 |
+
} // namespace protobuf
|
89 |
+
} // namespace google
|
90 |
+
#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_INL_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream.h
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This file contains the ZeroCopyInputStream and ZeroCopyOutputStream
|
36 |
+
// interfaces, which represent abstract I/O streams to and from which
|
37 |
+
// protocol buffers can be read and written. For a few simple
|
38 |
+
// implementations of these interfaces, see zero_copy_stream_impl.h.
|
39 |
+
//
|
40 |
+
// These interfaces are different from classic I/O streams in that they
|
41 |
+
// try to minimize the amount of data copying that needs to be done.
|
42 |
+
// To accomplish this, responsibility for allocating buffers is moved to
|
43 |
+
// the stream object, rather than being the responsibility of the caller.
|
44 |
+
// So, the stream can return a buffer which actually points directly into
|
45 |
+
// the final data structure where the bytes are to be stored, and the caller
|
46 |
+
// can interact directly with that buffer, eliminating an intermediate copy
|
47 |
+
// operation.
|
48 |
+
//
|
49 |
+
// As an example, consider the common case in which you are reading bytes
|
50 |
+
// from an array that is already in memory (or perhaps an mmap()ed file).
|
51 |
+
// With classic I/O streams, you would do something like:
|
52 |
+
// char buffer[BUFFER_SIZE];
|
53 |
+
// input->Read(buffer, BUFFER_SIZE);
|
54 |
+
// DoSomething(buffer, BUFFER_SIZE);
|
55 |
+
// Then, the stream basically just calls memcpy() to copy the data from
|
56 |
+
// the array into your buffer. With a ZeroCopyInputStream, you would do
|
57 |
+
// this instead:
|
58 |
+
// const void* buffer;
|
59 |
+
// int size;
|
60 |
+
// input->Next(&buffer, &size);
|
61 |
+
// DoSomething(buffer, size);
|
62 |
+
// Here, no copy is performed. The input stream returns a pointer directly
|
63 |
+
// into the backing array, and the caller ends up reading directly from it.
|
64 |
+
//
|
65 |
+
// If you want to be able to read the old-fashion way, you can create
|
66 |
+
// a CodedInputStream or CodedOutputStream wrapping these objects and use
|
67 |
+
// their ReadRaw()/WriteRaw() methods. These will, of course, add a copy
|
68 |
+
// step, but Coded*Stream will handle buffering so at least it will be
|
69 |
+
// reasonably efficient.
|
70 |
+
//
|
71 |
+
// ZeroCopyInputStream example:
|
72 |
+
// // Read in a file and print its contents to stdout.
|
73 |
+
// int fd = open("myfile", O_RDONLY);
|
74 |
+
// ZeroCopyInputStream* input = new FileInputStream(fd);
|
75 |
+
//
|
76 |
+
// const void* buffer;
|
77 |
+
// int size;
|
78 |
+
// while (input->Next(&buffer, &size)) {
|
79 |
+
// cout.write(buffer, size);
|
80 |
+
// }
|
81 |
+
//
|
82 |
+
// delete input;
|
83 |
+
// close(fd);
|
84 |
+
//
|
85 |
+
// ZeroCopyOutputStream example:
|
86 |
+
// // Copy the contents of "infile" to "outfile", using plain read() for
|
87 |
+
// // "infile" but a ZeroCopyOutputStream for "outfile".
|
88 |
+
// int infd = open("infile", O_RDONLY);
|
89 |
+
// int outfd = open("outfile", O_WRONLY);
|
90 |
+
// ZeroCopyOutputStream* output = new FileOutputStream(outfd);
|
91 |
+
//
|
92 |
+
// void* buffer;
|
93 |
+
// int size;
|
94 |
+
// while (output->Next(&buffer, &size)) {
|
95 |
+
// int bytes = read(infd, buffer, size);
|
96 |
+
// if (bytes < size) {
|
97 |
+
// // Reached EOF.
|
98 |
+
// output->BackUp(size - bytes);
|
99 |
+
// break;
|
100 |
+
// }
|
101 |
+
// }
|
102 |
+
//
|
103 |
+
// delete output;
|
104 |
+
// close(infd);
|
105 |
+
// close(outfd);
|
106 |
+
|
107 |
+
#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__
|
108 |
+
#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__
|
109 |
+
|
110 |
+
#include <string>
|
111 |
+
#include <google/protobuf/stubs/common.h>
|
112 |
+
|
113 |
+
namespace google {
|
114 |
+
|
115 |
+
namespace protobuf {
|
116 |
+
namespace io {
|
117 |
+
|
118 |
+
// Defined in this file.
|
119 |
+
class ZeroCopyInputStream;
|
120 |
+
class ZeroCopyOutputStream;
|
121 |
+
|
122 |
+
// Abstract interface similar to an input stream but designed to minimize
|
123 |
+
// copying.
|
124 |
+
class LIBPROTOBUF_EXPORT ZeroCopyInputStream {
|
125 |
+
public:
|
126 |
+
ZeroCopyInputStream() {}
|
127 |
+
virtual ~ZeroCopyInputStream() {}
|
128 |
+
|
129 |
+
// Obtains a chunk of data from the stream.
|
130 |
+
//
|
131 |
+
// Preconditions:
|
132 |
+
// * "size" and "data" are not NULL.
|
133 |
+
//
|
134 |
+
// Postconditions:
|
135 |
+
// * If the returned value is false, there is no more data to return or
|
136 |
+
// an error occurred. All errors are permanent.
|
137 |
+
// * Otherwise, "size" points to the actual number of bytes read and "data"
|
138 |
+
// points to a pointer to a buffer containing these bytes.
|
139 |
+
// * Ownership of this buffer remains with the stream, and the buffer
|
140 |
+
// remains valid only until some other method of the stream is called
|
141 |
+
// or the stream is destroyed.
|
142 |
+
// * It is legal for the returned buffer to have zero size, as long
|
143 |
+
// as repeatedly calling Next() eventually yields a buffer with non-zero
|
144 |
+
// size.
|
145 |
+
virtual bool Next(const void** data, int* size) = 0;
|
146 |
+
|
147 |
+
// Backs up a number of bytes, so that the next call to Next() returns
|
148 |
+
// data again that was already returned by the last call to Next(). This
|
149 |
+
// is useful when writing procedures that are only supposed to read up
|
150 |
+
// to a certain point in the input, then return. If Next() returns a
|
151 |
+
// buffer that goes beyond what you wanted to read, you can use BackUp()
|
152 |
+
// to return to the point where you intended to finish.
|
153 |
+
//
|
154 |
+
// Preconditions:
|
155 |
+
// * The last method called must have been Next().
|
156 |
+
// * count must be less than or equal to the size of the last buffer
|
157 |
+
// returned by Next().
|
158 |
+
//
|
159 |
+
// Postconditions:
|
160 |
+
// * The last "count" bytes of the last buffer returned by Next() will be
|
161 |
+
// pushed back into the stream. Subsequent calls to Next() will return
|
162 |
+
// the same data again before producing new data.
|
163 |
+
virtual void BackUp(int count) = 0;
|
164 |
+
|
165 |
+
// Skips a number of bytes. Returns false if the end of the stream is
|
166 |
+
// reached or some input error occurred. In the end-of-stream case, the
|
167 |
+
// stream is advanced to the end of the stream (so ByteCount() will return
|
168 |
+
// the total size of the stream).
|
169 |
+
virtual bool Skip(int count) = 0;
|
170 |
+
|
171 |
+
// Returns the total number of bytes read since this object was created.
|
172 |
+
virtual int64 ByteCount() const = 0;
|
173 |
+
|
174 |
+
|
175 |
+
private:
|
176 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyInputStream);
|
177 |
+
};
|
178 |
+
|
179 |
+
// Abstract interface similar to an output stream but designed to minimize
|
180 |
+
// copying.
|
181 |
+
class LIBPROTOBUF_EXPORT ZeroCopyOutputStream {
|
182 |
+
public:
|
183 |
+
ZeroCopyOutputStream() {}
|
184 |
+
virtual ~ZeroCopyOutputStream() {}
|
185 |
+
|
186 |
+
// Obtains a buffer into which data can be written. Any data written
|
187 |
+
// into this buffer will eventually (maybe instantly, maybe later on)
|
188 |
+
// be written to the output.
|
189 |
+
//
|
190 |
+
// Preconditions:
|
191 |
+
// * "size" and "data" are not NULL.
|
192 |
+
//
|
193 |
+
// Postconditions:
|
194 |
+
// * If the returned value is false, an error occurred. All errors are
|
195 |
+
// permanent.
|
196 |
+
// * Otherwise, "size" points to the actual number of bytes in the buffer
|
197 |
+
// and "data" points to the buffer.
|
198 |
+
// * Ownership of this buffer remains with the stream, and the buffer
|
199 |
+
// remains valid only until some other method of the stream is called
|
200 |
+
// or the stream is destroyed.
|
201 |
+
// * Any data which the caller stores in this buffer will eventually be
|
202 |
+
// written to the output (unless BackUp() is called).
|
203 |
+
// * It is legal for the returned buffer to have zero size, as long
|
204 |
+
// as repeatedly calling Next() eventually yields a buffer with non-zero
|
205 |
+
// size.
|
206 |
+
virtual bool Next(void** data, int* size) = 0;
|
207 |
+
|
208 |
+
// Backs up a number of bytes, so that the end of the last buffer returned
|
209 |
+
// by Next() is not actually written. This is needed when you finish
|
210 |
+
// writing all the data you want to write, but the last buffer was bigger
|
211 |
+
// than you needed. You don't want to write a bunch of garbage after the
|
212 |
+
// end of your data, so you use BackUp() to back up.
|
213 |
+
//
|
214 |
+
// Preconditions:
|
215 |
+
// * The last method called must have been Next().
|
216 |
+
// * count must be less than or equal to the size of the last buffer
|
217 |
+
// returned by Next().
|
218 |
+
// * The caller must not have written anything to the last "count" bytes
|
219 |
+
// of that buffer.
|
220 |
+
//
|
221 |
+
// Postconditions:
|
222 |
+
// * The last "count" bytes of the last buffer returned by Next() will be
|
223 |
+
// ignored.
|
224 |
+
virtual void BackUp(int count) = 0;
|
225 |
+
|
226 |
+
// Returns the total number of bytes written since this object was created.
|
227 |
+
virtual int64 ByteCount() const = 0;
|
228 |
+
|
229 |
+
// Write a given chunk of data to the output. Some output streams may
|
230 |
+
// implement this in a way that avoids copying. Check AllowsAliasing() before
|
231 |
+
// calling WriteAliasedRaw(). It will GOOGLE_CHECK fail if WriteAliasedRaw() is
|
232 |
+
// called on a stream that does not allow aliasing.
|
233 |
+
//
|
234 |
+
// NOTE: It is caller's responsibility to ensure that the chunk of memory
|
235 |
+
// remains live until all of the data has been consumed from the stream.
|
236 |
+
virtual bool WriteAliasedRaw(const void* data, int size);
|
237 |
+
virtual bool AllowsAliasing() const { return false; }
|
238 |
+
|
239 |
+
|
240 |
+
private:
|
241 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyOutputStream);
|
242 |
+
};
|
243 |
+
|
244 |
+
} // namespace io
|
245 |
+
} // namespace protobuf
|
246 |
+
|
247 |
+
} // namespace google
|
248 |
+
#endif // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/io/zero_copy_stream_impl_lite.h
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// This file contains common implementations of the interfaces defined in
|
36 |
+
// zero_copy_stream.h which are included in the "lite" protobuf library.
|
37 |
+
// These implementations cover I/O on raw arrays and strings, as well as
|
38 |
+
// adaptors which make it easy to implement streams based on traditional
|
39 |
+
// streams. Of course, many users will probably want to write their own
|
40 |
+
// implementations of these interfaces specific to the particular I/O
|
41 |
+
// abstractions they prefer to use, but these should cover the most common
|
42 |
+
// cases.
|
43 |
+
|
44 |
+
#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__
|
45 |
+
#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__
|
46 |
+
|
47 |
+
#include <memory>
|
48 |
+
#include <string>
|
49 |
+
#include <iosfwd>
|
50 |
+
#include <google/protobuf/io/zero_copy_stream.h>
|
51 |
+
#include <google/protobuf/stubs/callback.h>
|
52 |
+
#include <google/protobuf/stubs/common.h>
|
53 |
+
#include <google/protobuf/stubs/stl_util.h>
|
54 |
+
|
55 |
+
|
56 |
+
namespace google {
|
57 |
+
namespace protobuf {
|
58 |
+
namespace io {
|
59 |
+
|
60 |
+
// ===================================================================
|
61 |
+
|
62 |
+
// A ZeroCopyInputStream backed by an in-memory array of bytes.
|
63 |
+
class LIBPROTOBUF_EXPORT ArrayInputStream : public ZeroCopyInputStream {
|
64 |
+
public:
|
65 |
+
// Create an InputStream that returns the bytes pointed to by "data".
|
66 |
+
// "data" remains the property of the caller but must remain valid until
|
67 |
+
// the stream is destroyed. If a block_size is given, calls to Next()
|
68 |
+
// will return data blocks no larger than the given size. Otherwise, the
|
69 |
+
// first call to Next() returns the entire array. block_size is mainly
|
70 |
+
// useful for testing; in production you would probably never want to set
|
71 |
+
// it.
|
72 |
+
ArrayInputStream(const void* data, int size, int block_size = -1);
|
73 |
+
|
74 |
+
// implements ZeroCopyInputStream ----------------------------------
|
75 |
+
bool Next(const void** data, int* size);
|
76 |
+
void BackUp(int count);
|
77 |
+
bool Skip(int count);
|
78 |
+
int64 ByteCount() const;
|
79 |
+
|
80 |
+
|
81 |
+
private:
|
82 |
+
const uint8* const data_; // The byte array.
|
83 |
+
const int size_; // Total size of the array.
|
84 |
+
const int block_size_; // How many bytes to return at a time.
|
85 |
+
|
86 |
+
int position_;
|
87 |
+
int last_returned_size_; // How many bytes we returned last time Next()
|
88 |
+
// was called (used for error checking only).
|
89 |
+
|
90 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayInputStream);
|
91 |
+
};
|
92 |
+
|
93 |
+
// ===================================================================
|
94 |
+
|
95 |
+
// A ZeroCopyOutputStream backed by an in-memory array of bytes.
|
96 |
+
class LIBPROTOBUF_EXPORT ArrayOutputStream : public ZeroCopyOutputStream {
|
97 |
+
public:
|
98 |
+
// Create an OutputStream that writes to the bytes pointed to by "data".
|
99 |
+
// "data" remains the property of the caller but must remain valid until
|
100 |
+
// the stream is destroyed. If a block_size is given, calls to Next()
|
101 |
+
// will return data blocks no larger than the given size. Otherwise, the
|
102 |
+
// first call to Next() returns the entire array. block_size is mainly
|
103 |
+
// useful for testing; in production you would probably never want to set
|
104 |
+
// it.
|
105 |
+
ArrayOutputStream(void* data, int size, int block_size = -1);
|
106 |
+
|
107 |
+
// implements ZeroCopyOutputStream ---------------------------------
|
108 |
+
bool Next(void** data, int* size);
|
109 |
+
void BackUp(int count);
|
110 |
+
int64 ByteCount() const;
|
111 |
+
|
112 |
+
private:
|
113 |
+
uint8* const data_; // The byte array.
|
114 |
+
const int size_; // Total size of the array.
|
115 |
+
const int block_size_; // How many bytes to return at a time.
|
116 |
+
|
117 |
+
int position_;
|
118 |
+
int last_returned_size_; // How many bytes we returned last time Next()
|
119 |
+
// was called (used for error checking only).
|
120 |
+
|
121 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayOutputStream);
|
122 |
+
};
|
123 |
+
|
124 |
+
// ===================================================================
|
125 |
+
|
126 |
+
// A ZeroCopyOutputStream which appends bytes to a string.
|
127 |
+
class LIBPROTOBUF_EXPORT StringOutputStream : public ZeroCopyOutputStream {
|
128 |
+
public:
|
129 |
+
// Create a StringOutputStream which appends bytes to the given string.
|
130 |
+
// The string remains property of the caller, but it is mutated in arbitrary
|
131 |
+
// ways and MUST NOT be accessed in any way until you're done with the
|
132 |
+
// stream. Either be sure there's no further usage, or (safest) destroy the
|
133 |
+
// stream before using the contents.
|
134 |
+
//
|
135 |
+
// Hint: If you call target->reserve(n) before creating the stream,
|
136 |
+
// the first call to Next() will return at least n bytes of buffer
|
137 |
+
// space.
|
138 |
+
explicit StringOutputStream(string* target);
|
139 |
+
|
140 |
+
// implements ZeroCopyOutputStream ---------------------------------
|
141 |
+
bool Next(void** data, int* size);
|
142 |
+
void BackUp(int count);
|
143 |
+
int64 ByteCount() const;
|
144 |
+
|
145 |
+
protected:
|
146 |
+
void SetString(string* target);
|
147 |
+
|
148 |
+
private:
|
149 |
+
static const int kMinimumSize = 16;
|
150 |
+
|
151 |
+
string* target_;
|
152 |
+
|
153 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringOutputStream);
|
154 |
+
};
|
155 |
+
|
156 |
+
// Note: There is no StringInputStream. Instead, just create an
|
157 |
+
// ArrayInputStream as follows:
|
158 |
+
// ArrayInputStream input(str.data(), str.size());
|
159 |
+
|
160 |
+
// ===================================================================
|
161 |
+
|
162 |
+
// A generic traditional input stream interface.
|
163 |
+
//
|
164 |
+
// Lots of traditional input streams (e.g. file descriptors, C stdio
|
165 |
+
// streams, and C++ iostreams) expose an interface where every read
|
166 |
+
// involves copying bytes into a buffer. If you want to take such an
|
167 |
+
// interface and make a ZeroCopyInputStream based on it, simply implement
|
168 |
+
// CopyingInputStream and then use CopyingInputStreamAdaptor.
|
169 |
+
//
|
170 |
+
// CopyingInputStream implementations should avoid buffering if possible.
|
171 |
+
// CopyingInputStreamAdaptor does its own buffering and will read data
|
172 |
+
// in large blocks.
|
173 |
+
class LIBPROTOBUF_EXPORT CopyingInputStream {
|
174 |
+
public:
|
175 |
+
virtual ~CopyingInputStream() {}
|
176 |
+
|
177 |
+
// Reads up to "size" bytes into the given buffer. Returns the number of
|
178 |
+
// bytes read. Read() waits until at least one byte is available, or
|
179 |
+
// returns zero if no bytes will ever become available (EOF), or -1 if a
|
180 |
+
// permanent read error occurred.
|
181 |
+
virtual int Read(void* buffer, int size) = 0;
|
182 |
+
|
183 |
+
// Skips the next "count" bytes of input. Returns the number of bytes
|
184 |
+
// actually skipped. This will always be exactly equal to "count" unless
|
185 |
+
// EOF was reached or a permanent read error occurred.
|
186 |
+
//
|
187 |
+
// The default implementation just repeatedly calls Read() into a scratch
|
188 |
+
// buffer.
|
189 |
+
virtual int Skip(int count);
|
190 |
+
};
|
191 |
+
|
192 |
+
// A ZeroCopyInputStream which reads from a CopyingInputStream. This is
|
193 |
+
// useful for implementing ZeroCopyInputStreams that read from traditional
|
194 |
+
// streams. Note that this class is not really zero-copy.
|
195 |
+
//
|
196 |
+
// If you want to read from file descriptors or C++ istreams, this is
|
197 |
+
// already implemented for you: use FileInputStream or IstreamInputStream
|
198 |
+
// respectively.
|
199 |
+
class LIBPROTOBUF_EXPORT CopyingInputStreamAdaptor : public ZeroCopyInputStream {
|
200 |
+
public:
|
201 |
+
// Creates a stream that reads from the given CopyingInputStream.
|
202 |
+
// If a block_size is given, it specifies the number of bytes that
|
203 |
+
// should be read and returned with each call to Next(). Otherwise,
|
204 |
+
// a reasonable default is used. The caller retains ownership of
|
205 |
+
// copying_stream unless SetOwnsCopyingStream(true) is called.
|
206 |
+
explicit CopyingInputStreamAdaptor(CopyingInputStream* copying_stream,
|
207 |
+
int block_size = -1);
|
208 |
+
~CopyingInputStreamAdaptor();
|
209 |
+
|
210 |
+
// Call SetOwnsCopyingStream(true) to tell the CopyingInputStreamAdaptor to
|
211 |
+
// delete the underlying CopyingInputStream when it is destroyed.
|
212 |
+
void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; }
|
213 |
+
|
214 |
+
// implements ZeroCopyInputStream ----------------------------------
|
215 |
+
bool Next(const void** data, int* size);
|
216 |
+
void BackUp(int count);
|
217 |
+
bool Skip(int count);
|
218 |
+
int64 ByteCount() const;
|
219 |
+
|
220 |
+
private:
|
221 |
+
// Insures that buffer_ is not NULL.
|
222 |
+
void AllocateBufferIfNeeded();
|
223 |
+
// Frees the buffer and resets buffer_used_.
|
224 |
+
void FreeBuffer();
|
225 |
+
|
226 |
+
// The underlying copying stream.
|
227 |
+
CopyingInputStream* copying_stream_;
|
228 |
+
bool owns_copying_stream_;
|
229 |
+
|
230 |
+
// True if we have seen a permenant error from the underlying stream.
|
231 |
+
bool failed_;
|
232 |
+
|
233 |
+
// The current position of copying_stream_, relative to the point where
|
234 |
+
// we started reading.
|
235 |
+
int64 position_;
|
236 |
+
|
237 |
+
// Data is read into this buffer. It may be NULL if no buffer is currently
|
238 |
+
// in use. Otherwise, it points to an array of size buffer_size_.
|
239 |
+
std::unique_ptr<uint8[]> buffer_;
|
240 |
+
const int buffer_size_;
|
241 |
+
|
242 |
+
// Number of valid bytes currently in the buffer (i.e. the size last
|
243 |
+
// returned by Next()). 0 <= buffer_used_ <= buffer_size_.
|
244 |
+
int buffer_used_;
|
245 |
+
|
246 |
+
// Number of bytes in the buffer which were backed up over by a call to
|
247 |
+
// BackUp(). These need to be returned again.
|
248 |
+
// 0 <= backup_bytes_ <= buffer_used_
|
249 |
+
int backup_bytes_;
|
250 |
+
|
251 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingInputStreamAdaptor);
|
252 |
+
};
|
253 |
+
|
254 |
+
// ===================================================================
|
255 |
+
|
256 |
+
// A generic traditional output stream interface.
|
257 |
+
//
|
258 |
+
// Lots of traditional output streams (e.g. file descriptors, C stdio
|
259 |
+
// streams, and C++ iostreams) expose an interface where every write
|
260 |
+
// involves copying bytes from a buffer. If you want to take such an
|
261 |
+
// interface and make a ZeroCopyOutputStream based on it, simply implement
|
262 |
+
// CopyingOutputStream and then use CopyingOutputStreamAdaptor.
|
263 |
+
//
|
264 |
+
// CopyingOutputStream implementations should avoid buffering if possible.
|
265 |
+
// CopyingOutputStreamAdaptor does its own buffering and will write data
|
266 |
+
// in large blocks.
|
267 |
+
class LIBPROTOBUF_EXPORT CopyingOutputStream {
|
268 |
+
public:
|
269 |
+
virtual ~CopyingOutputStream() {}
|
270 |
+
|
271 |
+
// Writes "size" bytes from the given buffer to the output. Returns true
|
272 |
+
// if successful, false on a write error.
|
273 |
+
virtual bool Write(const void* buffer, int size) = 0;
|
274 |
+
};
|
275 |
+
|
276 |
+
// A ZeroCopyOutputStream which writes to a CopyingOutputStream. This is
|
277 |
+
// useful for implementing ZeroCopyOutputStreams that write to traditional
|
278 |
+
// streams. Note that this class is not really zero-copy.
|
279 |
+
//
|
280 |
+
// If you want to write to file descriptors or C++ ostreams, this is
|
281 |
+
// already implemented for you: use FileOutputStream or OstreamOutputStream
|
282 |
+
// respectively.
|
283 |
+
class LIBPROTOBUF_EXPORT CopyingOutputStreamAdaptor : public ZeroCopyOutputStream {
|
284 |
+
public:
|
285 |
+
// Creates a stream that writes to the given Unix file descriptor.
|
286 |
+
// If a block_size is given, it specifies the size of the buffers
|
287 |
+
// that should be returned by Next(). Otherwise, a reasonable default
|
288 |
+
// is used.
|
289 |
+
explicit CopyingOutputStreamAdaptor(CopyingOutputStream* copying_stream,
|
290 |
+
int block_size = -1);
|
291 |
+
~CopyingOutputStreamAdaptor();
|
292 |
+
|
293 |
+
// Writes all pending data to the underlying stream. Returns false if a
|
294 |
+
// write error occurred on the underlying stream. (The underlying
|
295 |
+
// stream itself is not necessarily flushed.)
|
296 |
+
bool Flush();
|
297 |
+
|
298 |
+
// Call SetOwnsCopyingStream(true) to tell the CopyingOutputStreamAdaptor to
|
299 |
+
// delete the underlying CopyingOutputStream when it is destroyed.
|
300 |
+
void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; }
|
301 |
+
|
302 |
+
// implements ZeroCopyOutputStream ---------------------------------
|
303 |
+
bool Next(void** data, int* size);
|
304 |
+
void BackUp(int count);
|
305 |
+
int64 ByteCount() const;
|
306 |
+
|
307 |
+
private:
|
308 |
+
// Write the current buffer, if it is present.
|
309 |
+
bool WriteBuffer();
|
310 |
+
// Insures that buffer_ is not NULL.
|
311 |
+
void AllocateBufferIfNeeded();
|
312 |
+
// Frees the buffer.
|
313 |
+
void FreeBuffer();
|
314 |
+
|
315 |
+
// The underlying copying stream.
|
316 |
+
CopyingOutputStream* copying_stream_;
|
317 |
+
bool owns_copying_stream_;
|
318 |
+
|
319 |
+
// True if we have seen a permenant error from the underlying stream.
|
320 |
+
bool failed_;
|
321 |
+
|
322 |
+
// The current position of copying_stream_, relative to the point where
|
323 |
+
// we started writing.
|
324 |
+
int64 position_;
|
325 |
+
|
326 |
+
// Data is written from this buffer. It may be NULL if no buffer is
|
327 |
+
// currently in use. Otherwise, it points to an array of size buffer_size_.
|
328 |
+
std::unique_ptr<uint8[]> buffer_;
|
329 |
+
const int buffer_size_;
|
330 |
+
|
331 |
+
// Number of valid bytes currently in the buffer (i.e. the size last
|
332 |
+
// returned by Next()). When BackUp() is called, we just reduce this.
|
333 |
+
// 0 <= buffer_used_ <= buffer_size_.
|
334 |
+
int buffer_used_;
|
335 |
+
|
336 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingOutputStreamAdaptor);
|
337 |
+
};
|
338 |
+
|
339 |
+
// ===================================================================
|
340 |
+
|
341 |
+
// mutable_string_data() and as_string_data() are workarounds to improve
|
342 |
+
// the performance of writing new data to an existing string. Unfortunately
|
343 |
+
// the methods provided by the string class are suboptimal, and using memcpy()
|
344 |
+
// is mildly annoying because it requires its pointer args to be non-NULL even
|
345 |
+
// if we ask it to copy 0 bytes. Furthermore, string_as_array() has the
|
346 |
+
// property that it always returns NULL if its arg is the empty string, exactly
|
347 |
+
// what we want to avoid if we're using it in conjunction with memcpy()!
|
348 |
+
// With C++11, the desired memcpy() boils down to memcpy(..., &(*s)[0], size),
|
349 |
+
// where s is a string*. Without C++11, &(*s)[0] is not guaranteed to be safe,
|
350 |
+
// so we use string_as_array(), and live with the extra logic that tests whether
|
351 |
+
// *s is empty.
|
352 |
+
|
353 |
+
// Return a pointer to mutable characters underlying the given string. The
|
354 |
+
// return value is valid until the next time the string is resized. We
|
355 |
+
// trust the caller to treat the return value as an array of length s->size().
|
356 |
+
inline char* mutable_string_data(string* s) {
|
357 |
+
#ifdef LANG_CXX11
|
358 |
+
// This should be simpler & faster than string_as_array() because the latter
|
359 |
+
// is guaranteed to return NULL when *s is empty, so it has to check for that.
|
360 |
+
return &(*s)[0];
|
361 |
+
#else
|
362 |
+
return string_as_array(s);
|
363 |
+
#endif
|
364 |
+
}
|
365 |
+
|
366 |
+
// as_string_data(s) is equivalent to
|
367 |
+
// ({ char* p = mutable_string_data(s); make_pair(p, p != NULL); })
|
368 |
+
// Sometimes it's faster: in some scenarios p cannot be NULL, and then the
|
369 |
+
// code can avoid that check.
|
370 |
+
inline std::pair<char*, bool> as_string_data(string* s) {
|
371 |
+
char *p = mutable_string_data(s);
|
372 |
+
#ifdef LANG_CXX11
|
373 |
+
return std::make_pair(p, true);
|
374 |
+
#else
|
375 |
+
return std::make_pair(p, p != NULL);
|
376 |
+
#endif
|
377 |
+
}
|
378 |
+
|
379 |
+
} // namespace io
|
380 |
+
} // namespace protobuf
|
381 |
+
|
382 |
+
} // namespace google
|
383 |
+
#endif // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map.h
ADDED
@@ -0,0 +1,1219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// This file defines the map container and its helpers to support protobuf maps.
|
32 |
+
//
|
33 |
+
// The Map and MapIterator types are provided by this header file.
|
34 |
+
// Please avoid using other types defined here, unless they are public
|
35 |
+
// types within Map or MapIterator, such as Map::value_type.
|
36 |
+
|
37 |
+
#ifndef GOOGLE_PROTOBUF_MAP_H__
|
38 |
+
#define GOOGLE_PROTOBUF_MAP_H__
|
39 |
+
|
40 |
+
#include <initializer_list>
|
41 |
+
#include <iterator>
|
42 |
+
#include <limits> // To support Visual Studio 2008
|
43 |
+
#include <set>
|
44 |
+
#include <utility>
|
45 |
+
|
46 |
+
#include <google/protobuf/stubs/common.h>
|
47 |
+
#include <google/protobuf/arena.h>
|
48 |
+
#include <google/protobuf/generated_enum_util.h>
|
49 |
+
#include <google/protobuf/map_type_handler.h>
|
50 |
+
#include <google/protobuf/stubs/hash.h>
|
51 |
+
|
52 |
+
namespace google {
|
53 |
+
namespace protobuf {
|
54 |
+
|
55 |
+
template <typename Key, typename T>
|
56 |
+
class Map;
|
57 |
+
|
58 |
+
class MapIterator;
|
59 |
+
|
60 |
+
template <typename Enum> struct is_proto_enum;
|
61 |
+
|
62 |
+
namespace internal {
|
63 |
+
template <typename Derived, typename Key, typename T,
|
64 |
+
WireFormatLite::FieldType key_wire_type,
|
65 |
+
WireFormatLite::FieldType value_wire_type, int default_enum_value>
|
66 |
+
class MapFieldLite;
|
67 |
+
|
68 |
+
template <typename Derived, typename Key, typename T,
|
69 |
+
WireFormatLite::FieldType key_wire_type,
|
70 |
+
WireFormatLite::FieldType value_wire_type, int default_enum_value>
|
71 |
+
class MapField;
|
72 |
+
|
73 |
+
template <typename Key, typename T>
|
74 |
+
class TypeDefinedMapFieldBase;
|
75 |
+
|
76 |
+
class DynamicMapField;
|
77 |
+
|
78 |
+
class GeneratedMessageReflection;
|
79 |
+
} // namespace internal
|
80 |
+
|
81 |
+
// This is the class for google::protobuf::Map's internal value_type. Instead of using
|
82 |
+
// std::pair as value_type, we use this class which provides us more control of
|
83 |
+
// its process of construction and destruction.
|
84 |
+
template <typename Key, typename T>
|
85 |
+
class MapPair {
|
86 |
+
public:
|
87 |
+
typedef const Key first_type;
|
88 |
+
typedef T second_type;
|
89 |
+
|
90 |
+
MapPair(const Key& other_first, const T& other_second)
|
91 |
+
: first(other_first), second(other_second) {}
|
92 |
+
explicit MapPair(const Key& other_first) : first(other_first), second() {}
|
93 |
+
MapPair(const MapPair& other)
|
94 |
+
: first(other.first), second(other.second) {}
|
95 |
+
|
96 |
+
~MapPair() {}
|
97 |
+
|
98 |
+
// Implicitly convertible to std::pair of compatible types.
|
99 |
+
template <typename T1, typename T2>
|
100 |
+
operator std::pair<T1, T2>() const {
|
101 |
+
return std::pair<T1, T2>(first, second);
|
102 |
+
}
|
103 |
+
|
104 |
+
const Key first;
|
105 |
+
T second;
|
106 |
+
|
107 |
+
private:
|
108 |
+
friend class ::google::protobuf::Arena;
|
109 |
+
friend class Map<Key, T>;
|
110 |
+
};
|
111 |
+
|
112 |
+
// google::protobuf::Map is an associative container type used to store protobuf map
|
113 |
+
// fields. Each Map instance may or may not use a different hash function, a
|
114 |
+
// different iteration order, and so on. E.g., please don't examine
|
115 |
+
// implementation details to decide if the following would work:
|
116 |
+
// Map<int, int> m0, m1;
|
117 |
+
// m0[0] = m1[0] = m0[1] = m1[1] = 0;
|
118 |
+
// assert(m0.begin()->first == m1.begin()->first); // Bug!
|
119 |
+
//
|
120 |
+
// Map's interface is similar to std::unordered_map, except that Map is not
|
121 |
+
// designed to play well with exceptions.
|
122 |
+
template <typename Key, typename T>
|
123 |
+
class Map {
|
124 |
+
public:
|
125 |
+
typedef Key key_type;
|
126 |
+
typedef T mapped_type;
|
127 |
+
typedef MapPair<Key, T> value_type;
|
128 |
+
|
129 |
+
typedef value_type* pointer;
|
130 |
+
typedef const value_type* const_pointer;
|
131 |
+
typedef value_type& reference;
|
132 |
+
typedef const value_type& const_reference;
|
133 |
+
|
134 |
+
typedef size_t size_type;
|
135 |
+
typedef hash<Key> hasher;
|
136 |
+
|
137 |
+
Map() : arena_(NULL), default_enum_value_(0) { Init(); }
|
138 |
+
explicit Map(Arena* arena) : arena_(arena), default_enum_value_(0) { Init(); }
|
139 |
+
|
140 |
+
Map(const Map& other)
|
141 |
+
: arena_(NULL), default_enum_value_(other.default_enum_value_) {
|
142 |
+
Init();
|
143 |
+
insert(other.begin(), other.end());
|
144 |
+
}
|
145 |
+
|
146 |
+
Map(Map&& other) noexcept : Map() {
|
147 |
+
if (other.arena_) {
|
148 |
+
*this = other;
|
149 |
+
} else {
|
150 |
+
swap(other);
|
151 |
+
}
|
152 |
+
}
|
153 |
+
Map& operator=(Map&& other) noexcept {
|
154 |
+
if (this != &other) {
|
155 |
+
if (arena_ != other.arena_) {
|
156 |
+
*this = other;
|
157 |
+
} else {
|
158 |
+
swap(other);
|
159 |
+
}
|
160 |
+
}
|
161 |
+
return *this;
|
162 |
+
}
|
163 |
+
|
164 |
+
template <class InputIt>
|
165 |
+
Map(const InputIt& first, const InputIt& last)
|
166 |
+
: arena_(NULL), default_enum_value_(0) {
|
167 |
+
Init();
|
168 |
+
insert(first, last);
|
169 |
+
}
|
170 |
+
|
171 |
+
~Map() {
|
172 |
+
clear();
|
173 |
+
if (arena_ == NULL) {
|
174 |
+
delete elements_;
|
175 |
+
}
|
176 |
+
}
|
177 |
+
|
178 |
+
private:
|
179 |
+
void Init() {
|
180 |
+
elements_ = Arena::Create<InnerMap>(arena_, 0u, hasher(), Allocator(arena_));
|
181 |
+
}
|
182 |
+
|
183 |
+
// re-implement std::allocator to use arena allocator for memory allocation.
|
184 |
+
// Used for google::protobuf::Map implementation. Users should not use this class
|
185 |
+
// directly.
|
186 |
+
template <typename U>
|
187 |
+
class MapAllocator {
|
188 |
+
public:
|
189 |
+
typedef U value_type;
|
190 |
+
typedef value_type* pointer;
|
191 |
+
typedef const value_type* const_pointer;
|
192 |
+
typedef value_type& reference;
|
193 |
+
typedef const value_type& const_reference;
|
194 |
+
typedef size_t size_type;
|
195 |
+
typedef ptrdiff_t difference_type;
|
196 |
+
|
197 |
+
MapAllocator() : arena_(NULL) {}
|
198 |
+
explicit MapAllocator(Arena* arena) : arena_(arena) {}
|
199 |
+
template <typename X>
|
200 |
+
MapAllocator(const MapAllocator<X>& allocator)
|
201 |
+
: arena_(allocator.arena()) {}
|
202 |
+
|
203 |
+
pointer allocate(size_type n, const void* /* hint */ = 0) {
|
204 |
+
// If arena is not given, malloc needs to be called which doesn't
|
205 |
+
// construct element object.
|
206 |
+
if (arena_ == NULL) {
|
207 |
+
return static_cast<pointer>(::operator new(n * sizeof(value_type)));
|
208 |
+
} else {
|
209 |
+
return reinterpret_cast<pointer>(
|
210 |
+
Arena::CreateArray<uint8>(arena_, n * sizeof(value_type)));
|
211 |
+
}
|
212 |
+
}
|
213 |
+
|
214 |
+
void deallocate(pointer p, size_type n) {
|
215 |
+
if (arena_ == NULL) {
|
216 |
+
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
|
217 |
+
::operator delete(p, n * sizeof(value_type));
|
218 |
+
#else
|
219 |
+
(void)n;
|
220 |
+
::operator delete(p);
|
221 |
+
#endif
|
222 |
+
}
|
223 |
+
}
|
224 |
+
|
225 |
+
#if __cplusplus >= 201103L && !defined(GOOGLE_PROTOBUF_OS_APPLE) && \
|
226 |
+
!defined(GOOGLE_PROTOBUF_OS_NACL) && \
|
227 |
+
!defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
|
228 |
+
template<class NodeType, class... Args>
|
229 |
+
void construct(NodeType* p, Args&&... args) {
|
230 |
+
// Clang 3.6 doesn't compile static casting to void* directly. (Issue
|
231 |
+
// #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
|
232 |
+
// not cast away constness". So first the maybe const pointer is casted to
|
233 |
+
// const void* and after the const void* is const casted.
|
234 |
+
new (const_cast<void*>(static_cast<const void*>(p)))
|
235 |
+
NodeType(std::forward<Args>(args)...);
|
236 |
+
}
|
237 |
+
|
238 |
+
template<class NodeType>
|
239 |
+
void destroy(NodeType* p) {
|
240 |
+
p->~NodeType();
|
241 |
+
}
|
242 |
+
#else
|
243 |
+
void construct(pointer p, const_reference t) { new (p) value_type(t); }
|
244 |
+
|
245 |
+
void destroy(pointer p) { p->~value_type(); }
|
246 |
+
#endif
|
247 |
+
|
248 |
+
template <typename X>
|
249 |
+
struct rebind {
|
250 |
+
typedef MapAllocator<X> other;
|
251 |
+
};
|
252 |
+
|
253 |
+
template <typename X>
|
254 |
+
bool operator==(const MapAllocator<X>& other) const {
|
255 |
+
return arena_ == other.arena_;
|
256 |
+
}
|
257 |
+
|
258 |
+
template <typename X>
|
259 |
+
bool operator!=(const MapAllocator<X>& other) const {
|
260 |
+
return arena_ != other.arena_;
|
261 |
+
}
|
262 |
+
|
263 |
+
// To support Visual Studio 2008
|
264 |
+
size_type max_size() const {
|
265 |
+
// parentheses around (std::...:max) prevents macro warning of max()
|
266 |
+
return (std::numeric_limits<size_type>::max)();
|
267 |
+
}
|
268 |
+
|
269 |
+
// To support gcc-4.4, which does not properly
|
270 |
+
// support templated friend classes
|
271 |
+
Arena* arena() const {
|
272 |
+
return arena_;
|
273 |
+
}
|
274 |
+
|
275 |
+
private:
|
276 |
+
typedef void DestructorSkippable_;
|
277 |
+
Arena* const arena_;
|
278 |
+
};
|
279 |
+
|
280 |
+
// InnerMap's key type is Key and its value type is value_type*. We use a
|
281 |
+
// custom class here and for Node, below, to ensure that k_ is at offset 0,
|
282 |
+
// allowing safe conversion from pointer to Node to pointer to Key, and vice
|
283 |
+
// versa when appropriate.
|
284 |
+
class KeyValuePair {
|
285 |
+
public:
|
286 |
+
KeyValuePair(const Key& k, value_type* v) : k_(k), v_(v) {}
|
287 |
+
|
288 |
+
const Key& key() const { return k_; }
|
289 |
+
Key& key() { return k_; }
|
290 |
+
value_type* value() const { return v_; }
|
291 |
+
value_type*& value() { return v_; }
|
292 |
+
|
293 |
+
private:
|
294 |
+
Key k_;
|
295 |
+
value_type* v_;
|
296 |
+
};
|
297 |
+
|
298 |
+
typedef MapAllocator<KeyValuePair> Allocator;
|
299 |
+
|
300 |
+
// InnerMap is a generic hash-based map. It doesn't contain any
|
301 |
+
// protocol-buffer-specific logic. It is a chaining hash map with the
|
302 |
+
// additional feature that some buckets can be converted to use an ordered
|
303 |
+
// container. This ensures O(lg n) bounds on find, insert, and erase, while
|
304 |
+
// avoiding the overheads of ordered containers most of the time.
|
305 |
+
//
|
306 |
+
// The implementation doesn't need the full generality of unordered_map,
|
307 |
+
// and it doesn't have it. More bells and whistles can be added as needed.
|
308 |
+
// Some implementation details:
|
309 |
+
// 1. The hash function has type hasher and the equality function
|
310 |
+
// equal_to<Key>. We inherit from hasher to save space
|
311 |
+
// (empty-base-class optimization).
|
312 |
+
// 2. The number of buckets is a power of two.
|
313 |
+
// 3. Buckets are converted to trees in pairs: if we convert bucket b then
|
314 |
+
// buckets b and b^1 will share a tree. Invariant: buckets b and b^1 have
|
315 |
+
// the same non-NULL value iff they are sharing a tree. (An alternative
|
316 |
+
// implementation strategy would be to have a tag bit per bucket.)
|
317 |
+
// 4. As is typical for hash_map and such, the Keys and Values are always
|
318 |
+
// stored in linked list nodes. Pointers to elements are never invalidated
|
319 |
+
// until the element is deleted.
|
320 |
+
// 5. The trees' payload type is pointer to linked-list node. Tree-converting
|
321 |
+
// a bucket doesn't copy Key-Value pairs.
|
322 |
+
// 6. Once we've tree-converted a bucket, it is never converted back. However,
|
323 |
+
// the items a tree contains may wind up assigned to trees or lists upon a
|
324 |
+
// rehash.
|
325 |
+
// 7. The code requires no C++ features from C++11 or later.
|
326 |
+
// 8. Mutations to a map do not invalidate the map's iterators, pointers to
|
327 |
+
// elements, or references to elements.
|
328 |
+
// 9. Except for erase(iterator), any non-const method can reorder iterators.
|
329 |
+
class InnerMap : private hasher {
|
330 |
+
public:
|
331 |
+
typedef value_type* Value;
|
332 |
+
|
333 |
+
InnerMap(size_type n, hasher h, Allocator alloc)
|
334 |
+
: hasher(h),
|
335 |
+
num_elements_(0),
|
336 |
+
seed_(Seed()),
|
337 |
+
table_(NULL),
|
338 |
+
alloc_(alloc) {
|
339 |
+
n = TableSize(n);
|
340 |
+
table_ = CreateEmptyTable(n);
|
341 |
+
num_buckets_ = index_of_first_non_null_ = n;
|
342 |
+
}
|
343 |
+
|
344 |
+
~InnerMap() {
|
345 |
+
if (table_ != NULL) {
|
346 |
+
clear();
|
347 |
+
Dealloc<void*>(table_, num_buckets_);
|
348 |
+
}
|
349 |
+
}
|
350 |
+
|
351 |
+
private:
|
352 |
+
enum { kMinTableSize = 8 };
|
353 |
+
|
354 |
+
// Linked-list nodes, as one would expect for a chaining hash table.
|
355 |
+
struct Node {
|
356 |
+
KeyValuePair kv;
|
357 |
+
Node* next;
|
358 |
+
};
|
359 |
+
|
360 |
+
// This is safe only if the given pointer is known to point to a Key that is
|
361 |
+
// part of a Node.
|
362 |
+
static Node* NodePtrFromKeyPtr(Key* k) {
|
363 |
+
return reinterpret_cast<Node*>(k);
|
364 |
+
}
|
365 |
+
|
366 |
+
static Key* KeyPtrFromNodePtr(Node* node) { return &node->kv.key(); }
|
367 |
+
|
368 |
+
// Trees. The payload type is pointer to Key, so that we can query the tree
|
369 |
+
// with Keys that are not in any particular data structure. When we insert,
|
370 |
+
// though, the pointer is always pointing to a Key that is inside a Node.
|
371 |
+
struct KeyCompare {
|
372 |
+
bool operator()(const Key* n0, const Key* n1) const { return *n0 < *n1; }
|
373 |
+
};
|
374 |
+
typedef typename Allocator::template rebind<Key*>::other KeyPtrAllocator;
|
375 |
+
typedef std::set<Key*, KeyCompare, KeyPtrAllocator> Tree;
|
376 |
+
typedef typename Tree::iterator TreeIterator;
|
377 |
+
|
378 |
+
// iterator and const_iterator are instantiations of iterator_base.
|
379 |
+
template <typename KeyValueType>
|
380 |
+
struct iterator_base {
|
381 |
+
typedef KeyValueType& reference;
|
382 |
+
typedef KeyValueType* pointer;
|
383 |
+
|
384 |
+
// Invariants:
|
385 |
+
// node_ is always correct. This is handy because the most common
|
386 |
+
// operations are operator* and operator-> and they only use node_.
|
387 |
+
// When node_ is set to a non-NULL value, all the other non-const fields
|
388 |
+
// are updated to be correct also, but those fields can become stale
|
389 |
+
// if the underlying map is modified. When those fields are needed they
|
390 |
+
// are rechecked, and updated if necessary.
|
391 |
+
iterator_base() : node_(NULL), m_(NULL), bucket_index_(0) {}
|
392 |
+
|
393 |
+
explicit iterator_base(const InnerMap* m) : m_(m) {
|
394 |
+
SearchFrom(m->index_of_first_non_null_);
|
395 |
+
}
|
396 |
+
|
397 |
+
// Any iterator_base can convert to any other. This is overkill, and we
|
398 |
+
// rely on the enclosing class to use it wisely. The standard "iterator
|
399 |
+
// can convert to const_iterator" is OK but the reverse direction is not.
|
400 |
+
template <typename U>
|
401 |
+
explicit iterator_base(const iterator_base<U>& it)
|
402 |
+
: node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {}
|
403 |
+
|
404 |
+
iterator_base(Node* n, const InnerMap* m, size_type index)
|
405 |
+
: node_(n), m_(m), bucket_index_(index) {}
|
406 |
+
|
407 |
+
iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index)
|
408 |
+
: node_(NodePtrFromKeyPtr(*tree_it)), m_(m), bucket_index_(index) {
|
409 |
+
// Invariant: iterators that use buckets with trees have an even
|
410 |
+
// bucket_index_.
|
411 |
+
GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0);
|
412 |
+
}
|
413 |
+
|
414 |
+
// Advance through buckets, looking for the first that isn't empty.
|
415 |
+
// If nothing non-empty is found then leave node_ == NULL.
|
416 |
+
void SearchFrom(size_type start_bucket) {
|
417 |
+
GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
|
418 |
+
m_->table_[m_->index_of_first_non_null_] != NULL);
|
419 |
+
node_ = NULL;
|
420 |
+
for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_;
|
421 |
+
bucket_index_++) {
|
422 |
+
if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
|
423 |
+
node_ = static_cast<Node*>(m_->table_[bucket_index_]);
|
424 |
+
break;
|
425 |
+
} else if (m_->TableEntryIsTree(bucket_index_)) {
|
426 |
+
Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
|
427 |
+
GOOGLE_DCHECK(!tree->empty());
|
428 |
+
node_ = NodePtrFromKeyPtr(*tree->begin());
|
429 |
+
break;
|
430 |
+
}
|
431 |
+
}
|
432 |
+
}
|
433 |
+
|
434 |
+
reference operator*() const { return node_->kv; }
|
435 |
+
pointer operator->() const { return &(operator*()); }
|
436 |
+
|
437 |
+
friend bool operator==(const iterator_base& a, const iterator_base& b) {
|
438 |
+
return a.node_ == b.node_;
|
439 |
+
}
|
440 |
+
friend bool operator!=(const iterator_base& a, const iterator_base& b) {
|
441 |
+
return a.node_ != b.node_;
|
442 |
+
}
|
443 |
+
|
444 |
+
iterator_base& operator++() {
|
445 |
+
if (node_->next == NULL) {
|
446 |
+
TreeIterator tree_it;
|
447 |
+
const bool is_list = revalidate_if_necessary(&tree_it);
|
448 |
+
if (is_list) {
|
449 |
+
SearchFrom(bucket_index_ + 1);
|
450 |
+
} else {
|
451 |
+
GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0);
|
452 |
+
Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
|
453 |
+
if (++tree_it == tree->end()) {
|
454 |
+
SearchFrom(bucket_index_ + 2);
|
455 |
+
} else {
|
456 |
+
node_ = NodePtrFromKeyPtr(*tree_it);
|
457 |
+
}
|
458 |
+
}
|
459 |
+
} else {
|
460 |
+
node_ = node_->next;
|
461 |
+
}
|
462 |
+
return *this;
|
463 |
+
}
|
464 |
+
|
465 |
+
iterator_base operator++(int /* unused */) {
|
466 |
+
iterator_base tmp = *this;
|
467 |
+
++*this;
|
468 |
+
return tmp;
|
469 |
+
}
|
470 |
+
|
471 |
+
// Assumes node_ and m_ are correct and non-NULL, but other fields may be
|
472 |
+
// stale. Fix them as needed. Then return true iff node_ points to a
|
473 |
+
// Node in a list. If false is returned then *it is modified to be
|
474 |
+
// a valid iterator for node_.
|
475 |
+
bool revalidate_if_necessary(TreeIterator* it) {
|
476 |
+
GOOGLE_DCHECK(node_ != NULL && m_ != NULL);
|
477 |
+
// Force bucket_index_ to be in range.
|
478 |
+
bucket_index_ &= (m_->num_buckets_ - 1);
|
479 |
+
// Common case: the bucket we think is relevant points to node_.
|
480 |
+
if (m_->table_[bucket_index_] == static_cast<void*>(node_))
|
481 |
+
return true;
|
482 |
+
// Less common: the bucket is a linked list with node_ somewhere in it,
|
483 |
+
// but not at the head.
|
484 |
+
if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
|
485 |
+
Node* l = static_cast<Node*>(m_->table_[bucket_index_]);
|
486 |
+
while ((l = l->next) != NULL) {
|
487 |
+
if (l == node_) {
|
488 |
+
return true;
|
489 |
+
}
|
490 |
+
}
|
491 |
+
}
|
492 |
+
// Well, bucket_index_ still might be correct, but probably
|
493 |
+
// not. Revalidate just to be sure. This case is rare enough that we
|
494 |
+
// don't worry about potential optimizations, such as having a custom
|
495 |
+
// find-like method that compares Node* instead of const Key&.
|
496 |
+
iterator_base i(m_->find(*KeyPtrFromNodePtr(node_), it));
|
497 |
+
bucket_index_ = i.bucket_index_;
|
498 |
+
return m_->TableEntryIsList(bucket_index_);
|
499 |
+
}
|
500 |
+
|
501 |
+
Node* node_;
|
502 |
+
const InnerMap* m_;
|
503 |
+
size_type bucket_index_;
|
504 |
+
};
|
505 |
+
|
506 |
+
public:
|
507 |
+
typedef iterator_base<KeyValuePair> iterator;
|
508 |
+
typedef iterator_base<const KeyValuePair> const_iterator;
|
509 |
+
|
510 |
+
iterator begin() { return iterator(this); }
|
511 |
+
iterator end() { return iterator(); }
|
512 |
+
const_iterator begin() const { return const_iterator(this); }
|
513 |
+
const_iterator end() const { return const_iterator(); }
|
514 |
+
|
515 |
+
void clear() {
|
516 |
+
for (size_type b = 0; b < num_buckets_; b++) {
|
517 |
+
if (TableEntryIsNonEmptyList(b)) {
|
518 |
+
Node* node = static_cast<Node*>(table_[b]);
|
519 |
+
table_[b] = NULL;
|
520 |
+
do {
|
521 |
+
Node* next = node->next;
|
522 |
+
DestroyNode(node);
|
523 |
+
node = next;
|
524 |
+
} while (node != NULL);
|
525 |
+
} else if (TableEntryIsTree(b)) {
|
526 |
+
Tree* tree = static_cast<Tree*>(table_[b]);
|
527 |
+
GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0);
|
528 |
+
table_[b] = table_[b + 1] = NULL;
|
529 |
+
typename Tree::iterator tree_it = tree->begin();
|
530 |
+
do {
|
531 |
+
Node* node = NodePtrFromKeyPtr(*tree_it);
|
532 |
+
typename Tree::iterator next = tree_it;
|
533 |
+
++next;
|
534 |
+
tree->erase(tree_it);
|
535 |
+
DestroyNode(node);
|
536 |
+
tree_it = next;
|
537 |
+
} while (tree_it != tree->end());
|
538 |
+
DestroyTree(tree);
|
539 |
+
b++;
|
540 |
+
}
|
541 |
+
}
|
542 |
+
num_elements_ = 0;
|
543 |
+
index_of_first_non_null_ = num_buckets_;
|
544 |
+
}
|
545 |
+
|
546 |
+
const hasher& hash_function() const { return *this; }
|
547 |
+
|
548 |
+
static size_type max_size() {
|
549 |
+
return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28);
|
550 |
+
}
|
551 |
+
size_type size() const { return num_elements_; }
|
552 |
+
bool empty() const { return size() == 0; }
|
553 |
+
|
554 |
+
iterator find(const Key& k) { return iterator(FindHelper(k).first); }
|
555 |
+
const_iterator find(const Key& k) const { return find(k, NULL); }
|
556 |
+
|
557 |
+
// In traditional C++ style, this performs "insert if not present."
|
558 |
+
std::pair<iterator, bool> insert(const KeyValuePair& kv) {
|
559 |
+
std::pair<const_iterator, size_type> p = FindHelper(kv.key());
|
560 |
+
// Case 1: key was already present.
|
561 |
+
if (p.first.node_ != NULL)
|
562 |
+
return std::make_pair(iterator(p.first), false);
|
563 |
+
// Case 2: insert.
|
564 |
+
if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
|
565 |
+
p = FindHelper(kv.key());
|
566 |
+
}
|
567 |
+
const size_type b = p.second; // bucket number
|
568 |
+
Node* node = Alloc<Node>(1);
|
569 |
+
alloc_.construct(&node->kv, kv);
|
570 |
+
iterator result = InsertUnique(b, node);
|
571 |
+
++num_elements_;
|
572 |
+
return std::make_pair(result, true);
|
573 |
+
}
|
574 |
+
|
575 |
+
// The same, but if an insertion is necessary then the value portion of the
|
576 |
+
// inserted key-value pair is left uninitialized.
|
577 |
+
std::pair<iterator, bool> insert(const Key& k) {
|
578 |
+
std::pair<const_iterator, size_type> p = FindHelper(k);
|
579 |
+
// Case 1: key was already present.
|
580 |
+
if (p.first.node_ != NULL)
|
581 |
+
return std::make_pair(iterator(p.first), false);
|
582 |
+
// Case 2: insert.
|
583 |
+
if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
|
584 |
+
p = FindHelper(k);
|
585 |
+
}
|
586 |
+
const size_type b = p.second; // bucket number
|
587 |
+
Node* node = Alloc<Node>(1);
|
588 |
+
typedef typename Allocator::template rebind<Key>::other KeyAllocator;
|
589 |
+
KeyAllocator(alloc_).construct(&node->kv.key(), k);
|
590 |
+
iterator result = InsertUnique(b, node);
|
591 |
+
++num_elements_;
|
592 |
+
return std::make_pair(result, true);
|
593 |
+
}
|
594 |
+
|
595 |
+
Value& operator[](const Key& k) {
|
596 |
+
KeyValuePair kv(k, Value());
|
597 |
+
return insert(kv).first->value();
|
598 |
+
}
|
599 |
+
|
600 |
+
void erase(iterator it) {
|
601 |
+
GOOGLE_DCHECK_EQ(it.m_, this);
|
602 |
+
typename Tree::iterator tree_it;
|
603 |
+
const bool is_list = it.revalidate_if_necessary(&tree_it);
|
604 |
+
size_type b = it.bucket_index_;
|
605 |
+
Node* const item = it.node_;
|
606 |
+
if (is_list) {
|
607 |
+
GOOGLE_DCHECK(TableEntryIsNonEmptyList(b));
|
608 |
+
Node* head = static_cast<Node*>(table_[b]);
|
609 |
+
head = EraseFromLinkedList(item, head);
|
610 |
+
table_[b] = static_cast<void*>(head);
|
611 |
+
} else {
|
612 |
+
GOOGLE_DCHECK(TableEntryIsTree(b));
|
613 |
+
Tree* tree = static_cast<Tree*>(table_[b]);
|
614 |
+
tree->erase(*tree_it);
|
615 |
+
if (tree->empty()) {
|
616 |
+
// Force b to be the minimum of b and b ^ 1. This is important
|
617 |
+
// only because we want index_of_first_non_null_ to be correct.
|
618 |
+
b &= ~static_cast<size_type>(1);
|
619 |
+
DestroyTree(tree);
|
620 |
+
table_[b] = table_[b + 1] = NULL;
|
621 |
+
}
|
622 |
+
}
|
623 |
+
DestroyNode(item);
|
624 |
+
--num_elements_;
|
625 |
+
if (GOOGLE_PREDICT_FALSE(b == index_of_first_non_null_)) {
|
626 |
+
while (index_of_first_non_null_ < num_buckets_ &&
|
627 |
+
table_[index_of_first_non_null_] == NULL) {
|
628 |
+
++index_of_first_non_null_;
|
629 |
+
}
|
630 |
+
}
|
631 |
+
}
|
632 |
+
|
633 |
+
private:
|
634 |
+
const_iterator find(const Key& k, TreeIterator* it) const {
|
635 |
+
return FindHelper(k, it).first;
|
636 |
+
}
|
637 |
+
std::pair<const_iterator, size_type> FindHelper(const Key& k) const {
|
638 |
+
return FindHelper(k, NULL);
|
639 |
+
}
|
640 |
+
std::pair<const_iterator, size_type> FindHelper(const Key& k,
|
641 |
+
TreeIterator* it) const {
|
642 |
+
size_type b = BucketNumber(k);
|
643 |
+
if (TableEntryIsNonEmptyList(b)) {
|
644 |
+
Node* node = static_cast<Node*>(table_[b]);
|
645 |
+
do {
|
646 |
+
if (IsMatch(*KeyPtrFromNodePtr(node), k)) {
|
647 |
+
return std::make_pair(const_iterator(node, this, b), b);
|
648 |
+
} else {
|
649 |
+
node = node->next;
|
650 |
+
}
|
651 |
+
} while (node != NULL);
|
652 |
+
} else if (TableEntryIsTree(b)) {
|
653 |
+
GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
|
654 |
+
b &= ~static_cast<size_t>(1);
|
655 |
+
Tree* tree = static_cast<Tree*>(table_[b]);
|
656 |
+
Key* key = const_cast<Key*>(&k);
|
657 |
+
typename Tree::iterator tree_it = tree->find(key);
|
658 |
+
if (tree_it != tree->end()) {
|
659 |
+
if (it != NULL) *it = tree_it;
|
660 |
+
return std::make_pair(const_iterator(tree_it, this, b), b);
|
661 |
+
}
|
662 |
+
}
|
663 |
+
return std::make_pair(end(), b);
|
664 |
+
}
|
665 |
+
|
666 |
+
// Insert the given Node in bucket b. If that would make bucket b too big,
|
667 |
+
// and bucket b is not a tree, create a tree for buckets b and b^1 to share.
|
668 |
+
// Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
|
669 |
+
// bucket. num_elements_ is not modified.
|
670 |
+
iterator InsertUnique(size_type b, Node* node) {
|
671 |
+
GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ ||
|
672 |
+
table_[index_of_first_non_null_] != NULL);
|
673 |
+
// In practice, the code that led to this point may have already
|
674 |
+
// determined whether we are inserting into an empty list, a short list,
|
675 |
+
// or whatever. But it's probably cheap enough to recompute that here;
|
676 |
+
// it's likely that we're inserting into an empty or short list.
|
677 |
+
iterator result;
|
678 |
+
GOOGLE_DCHECK(find(*KeyPtrFromNodePtr(node)) == end());
|
679 |
+
if (TableEntryIsEmpty(b)) {
|
680 |
+
result = InsertUniqueInList(b, node);
|
681 |
+
} else if (TableEntryIsNonEmptyList(b)) {
|
682 |
+
if (GOOGLE_PREDICT_FALSE(TableEntryIsTooLong(b))) {
|
683 |
+
TreeConvert(b);
|
684 |
+
result = InsertUniqueInTree(b, node);
|
685 |
+
GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1));
|
686 |
+
} else {
|
687 |
+
// Insert into a pre-existing list. This case cannot modify
|
688 |
+
// index_of_first_non_null_, so we skip the code to update it.
|
689 |
+
return InsertUniqueInList(b, node);
|
690 |
+
}
|
691 |
+
} else {
|
692 |
+
// Insert into a pre-existing tree. This case cannot modify
|
693 |
+
// index_of_first_non_null_, so we skip the code to update it.
|
694 |
+
return InsertUniqueInTree(b, node);
|
695 |
+
}
|
696 |
+
// parentheses around (std::min) prevents macro expansion of min(...)
|
697 |
+
index_of_first_non_null_ =
|
698 |
+
(std::min)(index_of_first_non_null_, result.bucket_index_);
|
699 |
+
return result;
|
700 |
+
}
|
701 |
+
|
702 |
+
// Helper for InsertUnique. Handles the case where bucket b is a
|
703 |
+
// not-too-long linked list.
|
704 |
+
iterator InsertUniqueInList(size_type b, Node* node) {
|
705 |
+
node->next = static_cast<Node*>(table_[b]);
|
706 |
+
table_[b] = static_cast<void*>(node);
|
707 |
+
return iterator(node, this, b);
|
708 |
+
}
|
709 |
+
|
710 |
+
// Helper for InsertUnique. Handles the case where bucket b points to a
|
711 |
+
// Tree.
|
712 |
+
iterator InsertUniqueInTree(size_type b, Node* node) {
|
713 |
+
GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
|
714 |
+
// Maintain the invariant that node->next is NULL for all Nodes in Trees.
|
715 |
+
node->next = NULL;
|
716 |
+
return iterator(static_cast<Tree*>(table_[b])
|
717 |
+
->insert(KeyPtrFromNodePtr(node))
|
718 |
+
.first,
|
719 |
+
this, b & ~static_cast<size_t>(1));
|
720 |
+
}
|
721 |
+
|
722 |
+
// Returns whether it did resize. Currently this is only used when
|
723 |
+
// num_elements_ increases, though it could be used in other situations.
|
724 |
+
// It checks for load too low as well as load too high: because any number
|
725 |
+
// of erases can occur between inserts, the load could be as low as 0 here.
|
726 |
+
// Resizing to a lower size is not always helpful, but failing to do so can
|
727 |
+
// destroy the expected big-O bounds for some operations. By having the
|
728 |
+
// policy that sometimes we resize down as well as up, clients can easily
|
729 |
+
// keep O(size()) = O(number of buckets) if they want that.
|
730 |
+
bool ResizeIfLoadIsOutOfRange(size_type new_size) {
|
731 |
+
const size_type kMaxMapLoadTimes16 = 12; // controls RAM vs CPU tradeoff
|
732 |
+
const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16;
|
733 |
+
const size_type lo_cutoff = hi_cutoff / 4;
|
734 |
+
// We don't care how many elements are in trees. If a lot are,
|
735 |
+
// we may resize even though there are many empty buckets. In
|
736 |
+
// practice, this seems fine.
|
737 |
+
if (GOOGLE_PREDICT_FALSE(new_size >= hi_cutoff)) {
|
738 |
+
if (num_buckets_ <= max_size() / 2) {
|
739 |
+
Resize(num_buckets_ * 2);
|
740 |
+
return true;
|
741 |
+
}
|
742 |
+
} else if (GOOGLE_PREDICT_FALSE(new_size <= lo_cutoff &&
|
743 |
+
num_buckets_ > kMinTableSize)) {
|
744 |
+
size_type lg2_of_size_reduction_factor = 1;
|
745 |
+
// It's possible we want to shrink a lot here... size() could even be 0.
|
746 |
+
// So, estimate how much to shrink by making sure we don't shrink so
|
747 |
+
// much that we would need to grow the table after a few inserts.
|
748 |
+
const size_type hypothetical_size = new_size * 5 / 4 + 1;
|
749 |
+
while ((hypothetical_size << lg2_of_size_reduction_factor) <
|
750 |
+
hi_cutoff) {
|
751 |
+
++lg2_of_size_reduction_factor;
|
752 |
+
}
|
753 |
+
size_type new_num_buckets = std::max<size_type>(
|
754 |
+
kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
|
755 |
+
if (new_num_buckets != num_buckets_) {
|
756 |
+
Resize(new_num_buckets);
|
757 |
+
return true;
|
758 |
+
}
|
759 |
+
}
|
760 |
+
return false;
|
761 |
+
}
|
762 |
+
|
763 |
+
// Resize to the given number of buckets.
|
764 |
+
void Resize(size_t new_num_buckets) {
|
765 |
+
GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize);
|
766 |
+
void** const old_table = table_;
|
767 |
+
const size_type old_table_size = num_buckets_;
|
768 |
+
num_buckets_ = new_num_buckets;
|
769 |
+
table_ = CreateEmptyTable(num_buckets_);
|
770 |
+
const size_type start = index_of_first_non_null_;
|
771 |
+
index_of_first_non_null_ = num_buckets_;
|
772 |
+
for (size_type i = start; i < old_table_size; i++) {
|
773 |
+
if (TableEntryIsNonEmptyList(old_table, i)) {
|
774 |
+
TransferList(old_table, i);
|
775 |
+
} else if (TableEntryIsTree(old_table, i)) {
|
776 |
+
TransferTree(old_table, i++);
|
777 |
+
}
|
778 |
+
}
|
779 |
+
Dealloc<void*>(old_table, old_table_size);
|
780 |
+
}
|
781 |
+
|
782 |
+
void TransferList(void* const* table, size_type index) {
|
783 |
+
Node* node = static_cast<Node*>(table[index]);
|
784 |
+
do {
|
785 |
+
Node* next = node->next;
|
786 |
+
InsertUnique(BucketNumber(*KeyPtrFromNodePtr(node)), node);
|
787 |
+
node = next;
|
788 |
+
} while (node != NULL);
|
789 |
+
}
|
790 |
+
|
791 |
+
void TransferTree(void* const* table, size_type index) {
|
792 |
+
Tree* tree = static_cast<Tree*>(table[index]);
|
793 |
+
typename Tree::iterator tree_it = tree->begin();
|
794 |
+
do {
|
795 |
+
Node* node = NodePtrFromKeyPtr(*tree_it);
|
796 |
+
InsertUnique(BucketNumber(**tree_it), node);
|
797 |
+
} while (++tree_it != tree->end());
|
798 |
+
DestroyTree(tree);
|
799 |
+
}
|
800 |
+
|
801 |
+
Node* EraseFromLinkedList(Node* item, Node* head) {
|
802 |
+
if (head == item) {
|
803 |
+
return head->next;
|
804 |
+
} else {
|
805 |
+
head->next = EraseFromLinkedList(item, head->next);
|
806 |
+
return head;
|
807 |
+
}
|
808 |
+
}
|
809 |
+
|
810 |
+
bool TableEntryIsEmpty(size_type b) const {
|
811 |
+
return TableEntryIsEmpty(table_, b);
|
812 |
+
}
|
813 |
+
bool TableEntryIsNonEmptyList(size_type b) const {
|
814 |
+
return TableEntryIsNonEmptyList(table_, b);
|
815 |
+
}
|
816 |
+
bool TableEntryIsTree(size_type b) const {
|
817 |
+
return TableEntryIsTree(table_, b);
|
818 |
+
}
|
819 |
+
bool TableEntryIsList(size_type b) const {
|
820 |
+
return TableEntryIsList(table_, b);
|
821 |
+
}
|
822 |
+
static bool TableEntryIsEmpty(void* const* table, size_type b) {
|
823 |
+
return table[b] == NULL;
|
824 |
+
}
|
825 |
+
static bool TableEntryIsNonEmptyList(void* const* table, size_type b) {
|
826 |
+
return table[b] != NULL && table[b] != table[b ^ 1];
|
827 |
+
}
|
828 |
+
static bool TableEntryIsTree(void* const* table, size_type b) {
|
829 |
+
return !TableEntryIsEmpty(table, b) &&
|
830 |
+
!TableEntryIsNonEmptyList(table, b);
|
831 |
+
}
|
832 |
+
static bool TableEntryIsList(void* const* table, size_type b) {
|
833 |
+
return !TableEntryIsTree(table, b);
|
834 |
+
}
|
835 |
+
|
836 |
+
void TreeConvert(size_type b) {
|
837 |
+
GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1));
|
838 |
+
typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);
|
839 |
+
Tree* tree = tree_allocator.allocate(1);
|
840 |
+
// We want to use the three-arg form of construct, if it exists, but we
|
841 |
+
// create a temporary and use the two-arg construct that's known to exist.
|
842 |
+
// It's clunky, but the compiler should be able to generate more-or-less
|
843 |
+
// the same code.
|
844 |
+
tree_allocator.construct(tree,
|
845 |
+
Tree(KeyCompare(), KeyPtrAllocator(alloc_)));
|
846 |
+
// Now the tree is ready to use.
|
847 |
+
size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree);
|
848 |
+
GOOGLE_DCHECK_EQ(count, tree->size());
|
849 |
+
table_[b] = table_[b ^ 1] = static_cast<void*>(tree);
|
850 |
+
}
|
851 |
+
|
852 |
+
// Copy a linked list in the given bucket to a tree.
|
853 |
+
// Returns the number of things it copied.
|
854 |
+
size_type CopyListToTree(size_type b, Tree* tree) {
|
855 |
+
size_type count = 0;
|
856 |
+
Node* node = static_cast<Node*>(table_[b]);
|
857 |
+
while (node != NULL) {
|
858 |
+
tree->insert(KeyPtrFromNodePtr(node));
|
859 |
+
++count;
|
860 |
+
Node* next = node->next;
|
861 |
+
node->next = NULL;
|
862 |
+
node = next;
|
863 |
+
}
|
864 |
+
return count;
|
865 |
+
}
|
866 |
+
|
867 |
+
// Return whether table_[b] is a linked list that seems awfully long.
|
868 |
+
// Requires table_[b] to point to a non-empty linked list.
|
869 |
+
bool TableEntryIsTooLong(size_type b) {
|
870 |
+
const size_type kMaxLength = 8;
|
871 |
+
size_type count = 0;
|
872 |
+
Node* node = static_cast<Node*>(table_[b]);
|
873 |
+
do {
|
874 |
+
++count;
|
875 |
+
node = node->next;
|
876 |
+
} while (node != NULL);
|
877 |
+
// Invariant: no linked list ever is more than kMaxLength in length.
|
878 |
+
GOOGLE_DCHECK_LE(count, kMaxLength);
|
879 |
+
return count >= kMaxLength;
|
880 |
+
}
|
881 |
+
|
882 |
+
size_type BucketNumber(const Key& k) const {
|
883 |
+
// We inherit from hasher, so one-arg operator() provides a hash function.
|
884 |
+
size_type h = (*const_cast<InnerMap*>(this))(k);
|
885 |
+
return (h + seed_) & (num_buckets_ - 1);
|
886 |
+
}
|
887 |
+
|
888 |
+
bool IsMatch(const Key& k0, const Key& k1) const {
|
889 |
+
return std::equal_to<Key>()(k0, k1);
|
890 |
+
}
|
891 |
+
|
892 |
+
// Return a power of two no less than max(kMinTableSize, n).
|
893 |
+
// Assumes either n < kMinTableSize or n is a power of two.
|
894 |
+
size_type TableSize(size_type n) {
|
895 |
+
return n < static_cast<size_type>(kMinTableSize)
|
896 |
+
? static_cast<size_type>(kMinTableSize)
|
897 |
+
: n;
|
898 |
+
}
|
899 |
+
|
900 |
+
// Use alloc_ to allocate an array of n objects of type U.
|
901 |
+
template <typename U>
|
902 |
+
U* Alloc(size_type n) {
|
903 |
+
typedef typename Allocator::template rebind<U>::other alloc_type;
|
904 |
+
return alloc_type(alloc_).allocate(n);
|
905 |
+
}
|
906 |
+
|
907 |
+
// Use alloc_ to deallocate an array of n objects of type U.
|
908 |
+
template <typename U>
|
909 |
+
void Dealloc(U* t, size_type n) {
|
910 |
+
typedef typename Allocator::template rebind<U>::other alloc_type;
|
911 |
+
alloc_type(alloc_).deallocate(t, n);
|
912 |
+
}
|
913 |
+
|
914 |
+
void DestroyNode(Node* node) {
|
915 |
+
alloc_.destroy(&node->kv);
|
916 |
+
Dealloc<Node>(node, 1);
|
917 |
+
}
|
918 |
+
|
919 |
+
void DestroyTree(Tree* tree) {
|
920 |
+
typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);
|
921 |
+
tree_allocator.destroy(tree);
|
922 |
+
tree_allocator.deallocate(tree, 1);
|
923 |
+
}
|
924 |
+
|
925 |
+
void** CreateEmptyTable(size_type n) {
|
926 |
+
GOOGLE_DCHECK(n >= kMinTableSize);
|
927 |
+
GOOGLE_DCHECK_EQ(n & (n - 1), 0);
|
928 |
+
void** result = Alloc<void*>(n);
|
929 |
+
memset(result, 0, n * sizeof(result[0]));
|
930 |
+
return result;
|
931 |
+
}
|
932 |
+
|
933 |
+
// Return a randomish value.
|
934 |
+
size_type Seed() const {
|
935 |
+
size_type s = static_cast<size_type>(reinterpret_cast<uintptr_t>(this));
|
936 |
+
#if defined(__x86_64__) && defined(__GNUC__)
|
937 |
+
uint32 hi, lo;
|
938 |
+
asm("rdtsc" : "=a" (lo), "=d" (hi));
|
939 |
+
s += ((static_cast<uint64>(hi) << 32) | lo);
|
940 |
+
#endif
|
941 |
+
return s;
|
942 |
+
}
|
943 |
+
|
944 |
+
size_type num_elements_;
|
945 |
+
size_type num_buckets_;
|
946 |
+
size_type seed_;
|
947 |
+
size_type index_of_first_non_null_;
|
948 |
+
void** table_; // an array with num_buckets_ entries
|
949 |
+
Allocator alloc_;
|
950 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap);
|
951 |
+
}; // end of class InnerMap
|
952 |
+
|
953 |
+
public:
|
954 |
+
// Iterators
|
955 |
+
class const_iterator {
|
956 |
+
typedef typename InnerMap::const_iterator InnerIt;
|
957 |
+
|
958 |
+
public:
|
959 |
+
typedef std::forward_iterator_tag iterator_category;
|
960 |
+
typedef typename Map::value_type value_type;
|
961 |
+
typedef ptrdiff_t difference_type;
|
962 |
+
typedef const value_type* pointer;
|
963 |
+
typedef const value_type& reference;
|
964 |
+
|
965 |
+
const_iterator() {}
|
966 |
+
explicit const_iterator(const InnerIt& it) : it_(it) {}
|
967 |
+
|
968 |
+
const_reference operator*() const {
|
969 |
+
return *it_->value();
|
970 |
+
}
|
971 |
+
const_pointer operator->() const { return &(operator*()); }
|
972 |
+
|
973 |
+
const_iterator& operator++() {
|
974 |
+
++it_;
|
975 |
+
return *this;
|
976 |
+
}
|
977 |
+
const_iterator operator++(int) { return const_iterator(it_++); }
|
978 |
+
|
979 |
+
friend bool operator==(const const_iterator& a, const const_iterator& b) {
|
980 |
+
return a.it_ == b.it_;
|
981 |
+
}
|
982 |
+
friend bool operator!=(const const_iterator& a, const const_iterator& b) {
|
983 |
+
return !(a == b);
|
984 |
+
}
|
985 |
+
|
986 |
+
private:
|
987 |
+
InnerIt it_;
|
988 |
+
};
|
989 |
+
|
990 |
+
class iterator {
|
991 |
+
typedef typename InnerMap::iterator InnerIt;
|
992 |
+
|
993 |
+
public:
|
994 |
+
typedef std::forward_iterator_tag iterator_category;
|
995 |
+
typedef typename Map::value_type value_type;
|
996 |
+
typedef ptrdiff_t difference_type;
|
997 |
+
typedef value_type* pointer;
|
998 |
+
typedef value_type& reference;
|
999 |
+
|
1000 |
+
iterator() {}
|
1001 |
+
explicit iterator(const InnerIt& it) : it_(it) {}
|
1002 |
+
|
1003 |
+
reference operator*() const { return *it_->value(); }
|
1004 |
+
pointer operator->() const { return &(operator*()); }
|
1005 |
+
|
1006 |
+
iterator& operator++() {
|
1007 |
+
++it_;
|
1008 |
+
return *this;
|
1009 |
+
}
|
1010 |
+
iterator operator++(int) { return iterator(it_++); }
|
1011 |
+
|
1012 |
+
// Allow implicit conversion to const_iterator.
|
1013 |
+
operator const_iterator() const {
|
1014 |
+
return const_iterator(typename InnerMap::const_iterator(it_));
|
1015 |
+
}
|
1016 |
+
|
1017 |
+
friend bool operator==(const iterator& a, const iterator& b) {
|
1018 |
+
return a.it_ == b.it_;
|
1019 |
+
}
|
1020 |
+
friend bool operator!=(const iterator& a, const iterator& b) {
|
1021 |
+
return !(a == b);
|
1022 |
+
}
|
1023 |
+
|
1024 |
+
private:
|
1025 |
+
friend class Map;
|
1026 |
+
|
1027 |
+
InnerIt it_;
|
1028 |
+
};
|
1029 |
+
|
1030 |
+
iterator begin() { return iterator(elements_->begin()); }
|
1031 |
+
iterator end() { return iterator(elements_->end()); }
|
1032 |
+
const_iterator begin() const {
|
1033 |
+
return const_iterator(iterator(elements_->begin()));
|
1034 |
+
}
|
1035 |
+
const_iterator end() const {
|
1036 |
+
return const_iterator(iterator(elements_->end()));
|
1037 |
+
}
|
1038 |
+
const_iterator cbegin() const { return begin(); }
|
1039 |
+
const_iterator cend() const { return end(); }
|
1040 |
+
|
1041 |
+
// Capacity
|
1042 |
+
size_type size() const { return elements_->size(); }
|
1043 |
+
bool empty() const { return size() == 0; }
|
1044 |
+
|
1045 |
+
// Element access
|
1046 |
+
T& operator[](const key_type& key) {
|
1047 |
+
value_type** value = &(*elements_)[key];
|
1048 |
+
if (*value == NULL) {
|
1049 |
+
*value = CreateValueTypeInternal(key);
|
1050 |
+
internal::MapValueInitializer<google::protobuf::is_proto_enum<T>::value,
|
1051 |
+
T>::Initialize((*value)->second,
|
1052 |
+
default_enum_value_);
|
1053 |
+
}
|
1054 |
+
return (*value)->second;
|
1055 |
+
}
|
1056 |
+
const T& at(const key_type& key) const {
|
1057 |
+
const_iterator it = find(key);
|
1058 |
+
GOOGLE_CHECK(it != end()) << "key not found: " << key;
|
1059 |
+
return it->second;
|
1060 |
+
}
|
1061 |
+
T& at(const key_type& key) {
|
1062 |
+
iterator it = find(key);
|
1063 |
+
GOOGLE_CHECK(it != end()) << "key not found: " << key;
|
1064 |
+
return it->second;
|
1065 |
+
}
|
1066 |
+
|
1067 |
+
// Lookup
|
1068 |
+
size_type count(const key_type& key) const {
|
1069 |
+
const_iterator it = find(key);
|
1070 |
+
GOOGLE_DCHECK(it == end() || key == it->first);
|
1071 |
+
return it == end() ? 0 : 1;
|
1072 |
+
}
|
1073 |
+
const_iterator find(const key_type& key) const {
|
1074 |
+
return const_iterator(iterator(elements_->find(key)));
|
1075 |
+
}
|
1076 |
+
iterator find(const key_type& key) { return iterator(elements_->find(key)); }
|
1077 |
+
std::pair<const_iterator, const_iterator> equal_range(
|
1078 |
+
const key_type& key) const {
|
1079 |
+
const_iterator it = find(key);
|
1080 |
+
if (it == end()) {
|
1081 |
+
return std::pair<const_iterator, const_iterator>(it, it);
|
1082 |
+
} else {
|
1083 |
+
const_iterator begin = it++;
|
1084 |
+
return std::pair<const_iterator, const_iterator>(begin, it);
|
1085 |
+
}
|
1086 |
+
}
|
1087 |
+
std::pair<iterator, iterator> equal_range(const key_type& key) {
|
1088 |
+
iterator it = find(key);
|
1089 |
+
if (it == end()) {
|
1090 |
+
return std::pair<iterator, iterator>(it, it);
|
1091 |
+
} else {
|
1092 |
+
iterator begin = it++;
|
1093 |
+
return std::pair<iterator, iterator>(begin, it);
|
1094 |
+
}
|
1095 |
+
}
|
1096 |
+
|
1097 |
+
// insert
|
1098 |
+
std::pair<iterator, bool> insert(const value_type& value) {
|
1099 |
+
std::pair<typename InnerMap::iterator, bool> p =
|
1100 |
+
elements_->insert(value.first);
|
1101 |
+
if (p.second) {
|
1102 |
+
p.first->value() = CreateValueTypeInternal(value);
|
1103 |
+
}
|
1104 |
+
return std::pair<iterator, bool>(iterator(p.first), p.second);
|
1105 |
+
}
|
1106 |
+
template <class InputIt>
|
1107 |
+
void insert(InputIt first, InputIt last) {
|
1108 |
+
for (InputIt it = first; it != last; ++it) {
|
1109 |
+
iterator exist_it = find(it->first);
|
1110 |
+
if (exist_it == end()) {
|
1111 |
+
operator[](it->first) = it->second;
|
1112 |
+
}
|
1113 |
+
}
|
1114 |
+
}
|
1115 |
+
void insert(std::initializer_list<value_type> values) {
|
1116 |
+
insert(values.begin(), values.end());
|
1117 |
+
}
|
1118 |
+
|
1119 |
+
// Erase and clear
|
1120 |
+
size_type erase(const key_type& key) {
|
1121 |
+
iterator it = find(key);
|
1122 |
+
if (it == end()) {
|
1123 |
+
return 0;
|
1124 |
+
} else {
|
1125 |
+
erase(it);
|
1126 |
+
return 1;
|
1127 |
+
}
|
1128 |
+
}
|
1129 |
+
iterator erase(iterator pos) {
|
1130 |
+
if (arena_ == NULL) delete pos.operator->();
|
1131 |
+
iterator i = pos++;
|
1132 |
+
elements_->erase(i.it_);
|
1133 |
+
return pos;
|
1134 |
+
}
|
1135 |
+
void erase(iterator first, iterator last) {
|
1136 |
+
while (first != last) {
|
1137 |
+
first = erase(first);
|
1138 |
+
}
|
1139 |
+
}
|
1140 |
+
void clear() { erase(begin(), end()); }
|
1141 |
+
|
1142 |
+
// Assign
|
1143 |
+
Map& operator=(const Map& other) {
|
1144 |
+
if (this != &other) {
|
1145 |
+
clear();
|
1146 |
+
insert(other.begin(), other.end());
|
1147 |
+
}
|
1148 |
+
return *this;
|
1149 |
+
}
|
1150 |
+
|
1151 |
+
void swap(Map& other) {
|
1152 |
+
if (arena_ == other.arena_) {
|
1153 |
+
std::swap(default_enum_value_, other.default_enum_value_);
|
1154 |
+
std::swap(elements_, other.elements_);
|
1155 |
+
} else {
|
1156 |
+
// TODO(zuguang): optimize this. The temporary copy can be allocated
|
1157 |
+
// in the same arena as the other message, and the "other = copy" can
|
1158 |
+
// be replaced with the fast-path swap above.
|
1159 |
+
Map copy = *this;
|
1160 |
+
*this = other;
|
1161 |
+
other = copy;
|
1162 |
+
}
|
1163 |
+
}
|
1164 |
+
|
1165 |
+
// Access to hasher. Currently this returns a copy, but it may
|
1166 |
+
// be modified to return a const reference in the future.
|
1167 |
+
hasher hash_function() const { return elements_->hash_function(); }
|
1168 |
+
|
1169 |
+
private:
|
1170 |
+
// Set default enum value only for proto2 map field whose value is enum type.
|
1171 |
+
void SetDefaultEnumValue(int default_enum_value) {
|
1172 |
+
default_enum_value_ = default_enum_value;
|
1173 |
+
}
|
1174 |
+
|
1175 |
+
value_type* CreateValueTypeInternal(const Key& key) {
|
1176 |
+
if (arena_ == NULL) {
|
1177 |
+
return new value_type(key);
|
1178 |
+
} else {
|
1179 |
+
value_type* value = reinterpret_cast<value_type*>(
|
1180 |
+
Arena::CreateArray<uint8>(arena_, sizeof(value_type)));
|
1181 |
+
Arena::CreateInArenaStorage(const_cast<Key*>(&value->first), arena_);
|
1182 |
+
Arena::CreateInArenaStorage(&value->second, arena_);
|
1183 |
+
const_cast<Key&>(value->first) = key;
|
1184 |
+
return value;
|
1185 |
+
}
|
1186 |
+
}
|
1187 |
+
|
1188 |
+
value_type* CreateValueTypeInternal(const value_type& value) {
|
1189 |
+
if (arena_ == NULL) {
|
1190 |
+
return new value_type(value);
|
1191 |
+
} else {
|
1192 |
+
value_type* p = reinterpret_cast<value_type*>(
|
1193 |
+
Arena::CreateArray<uint8>(arena_, sizeof(value_type)));
|
1194 |
+
Arena::CreateInArenaStorage(const_cast<Key*>(&p->first), arena_);
|
1195 |
+
Arena::CreateInArenaStorage(&p->second, arena_);
|
1196 |
+
const_cast<Key&>(p->first) = value.first;
|
1197 |
+
p->second = value.second;
|
1198 |
+
return p;
|
1199 |
+
}
|
1200 |
+
}
|
1201 |
+
|
1202 |
+
Arena* arena_;
|
1203 |
+
int default_enum_value_;
|
1204 |
+
InnerMap* elements_;
|
1205 |
+
|
1206 |
+
friend class ::google::protobuf::Arena;
|
1207 |
+
typedef void InternalArenaConstructable_;
|
1208 |
+
typedef void DestructorSkippable_;
|
1209 |
+
template <typename Derived, typename K, typename V,
|
1210 |
+
internal::WireFormatLite::FieldType key_wire_type,
|
1211 |
+
internal::WireFormatLite::FieldType value_wire_type,
|
1212 |
+
int default_enum_value>
|
1213 |
+
friend class internal::MapFieldLite;
|
1214 |
+
};
|
1215 |
+
|
1216 |
+
} // namespace protobuf
|
1217 |
+
|
1218 |
+
} // namespace google
|
1219 |
+
#endif // GOOGLE_PROTOBUF_MAP_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_entry_lite.h
ADDED
@@ -0,0 +1,671 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__
|
32 |
+
#define GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__
|
33 |
+
|
34 |
+
#include <assert.h>
|
35 |
+
|
36 |
+
#include <google/protobuf/stubs/casts.h>
|
37 |
+
#include <google/protobuf/arena.h>
|
38 |
+
#include <google/protobuf/arenastring.h>
|
39 |
+
#include <google/protobuf/map.h>
|
40 |
+
#include <google/protobuf/map_type_handler.h>
|
41 |
+
#include <google/protobuf/stubs/port.h>
|
42 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
43 |
+
|
44 |
+
namespace google {
|
45 |
+
namespace protobuf {
|
46 |
+
namespace internal {
|
47 |
+
template <typename Derived, typename Key, typename Value,
|
48 |
+
WireFormatLite::FieldType kKeyFieldType,
|
49 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
50 |
+
class MapEntry;
|
51 |
+
template <typename Derived, typename Key, typename Value,
|
52 |
+
WireFormatLite::FieldType kKeyFieldType,
|
53 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
54 |
+
class MapFieldLite;
|
55 |
+
} // namespace internal
|
56 |
+
} // namespace protobuf
|
57 |
+
|
58 |
+
namespace protobuf {
|
59 |
+
namespace internal {
|
60 |
+
|
61 |
+
// MoveHelper::Move is used to set *dest. It copies *src, or moves it (in
|
62 |
+
// the C++11 sense), or swaps it. *src is left in a sane state for
|
63 |
+
// subsequent destruction, but shouldn't be used for anything.
|
64 |
+
template <bool is_enum, bool is_message, bool is_stringlike, typename T>
|
65 |
+
struct MoveHelper { // primitives
|
66 |
+
static void Move(T* src, T* dest) { *dest = *src; }
|
67 |
+
};
|
68 |
+
|
69 |
+
template <bool is_message, bool is_stringlike, typename T>
|
70 |
+
struct MoveHelper<true, is_message, is_stringlike, T> { // enums
|
71 |
+
static void Move(T* src, T* dest) { *dest = *src; }
|
72 |
+
// T is an enum here, so allow conversions to and from int.
|
73 |
+
static void Move(T* src, int* dest) { *dest = static_cast<int>(*src); }
|
74 |
+
static void Move(int* src, T* dest) { *dest = static_cast<T>(*src); }
|
75 |
+
};
|
76 |
+
|
77 |
+
template <bool is_stringlike, typename T>
|
78 |
+
struct MoveHelper<false, true, is_stringlike, T> { // messages
|
79 |
+
static void Move(T* src, T* dest) { dest->Swap(src); }
|
80 |
+
};
|
81 |
+
|
82 |
+
template <typename T>
|
83 |
+
struct MoveHelper<false, false, true, T> { // strings and similar
|
84 |
+
static void Move(T* src, T* dest) {
|
85 |
+
#if __cplusplus >= 201103L
|
86 |
+
*dest = std::move(*src);
|
87 |
+
#else
|
88 |
+
dest->swap(*src);
|
89 |
+
#endif
|
90 |
+
}
|
91 |
+
};
|
92 |
+
|
93 |
+
// MapEntryImpl is used to implement parsing and serialization of map entries.
|
94 |
+
// It uses Curious Recursive Template Pattern (CRTP) to provide the type of
|
95 |
+
// the eventual code to the template code.
|
96 |
+
template <typename Derived, typename Base, typename Key, typename Value,
|
97 |
+
WireFormatLite::FieldType kKeyFieldType,
|
98 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
99 |
+
class MapEntryImpl : public Base {
|
100 |
+
protected:
|
101 |
+
// Provide utilities to parse/serialize key/value. Provide utilities to
|
102 |
+
// manipulate internal stored type.
|
103 |
+
typedef MapTypeHandler<kKeyFieldType, Key> KeyTypeHandler;
|
104 |
+
typedef MapTypeHandler<kValueFieldType, Value> ValueTypeHandler;
|
105 |
+
|
106 |
+
// Define internal memory layout. Strings and messages are stored as
|
107 |
+
// pointers, while other types are stored as values.
|
108 |
+
typedef typename KeyTypeHandler::TypeOnMemory KeyOnMemory;
|
109 |
+
typedef typename ValueTypeHandler::TypeOnMemory ValueOnMemory;
|
110 |
+
|
111 |
+
// Enum type cannot be used for MapTypeHandler::Read. Define a type
|
112 |
+
// which will replace Enum with int.
|
113 |
+
typedef typename KeyTypeHandler::MapEntryAccessorType KeyMapEntryAccessorType;
|
114 |
+
typedef typename ValueTypeHandler::MapEntryAccessorType
|
115 |
+
ValueMapEntryAccessorType;
|
116 |
+
|
117 |
+
// Constants for field number.
|
118 |
+
static const int kKeyFieldNumber = 1;
|
119 |
+
static const int kValueFieldNumber = 2;
|
120 |
+
|
121 |
+
// Constants for field tag.
|
122 |
+
static const uint8 kKeyTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(
|
123 |
+
kKeyFieldNumber, KeyTypeHandler::kWireType);
|
124 |
+
static const uint8 kValueTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(
|
125 |
+
kValueFieldNumber, ValueTypeHandler::kWireType);
|
126 |
+
static const size_t kTagSize = 1;
|
127 |
+
|
128 |
+
public:
|
129 |
+
// Work-around for a compiler bug (see repeated_field.h).
|
130 |
+
typedef void MapEntryHasMergeTypeTrait;
|
131 |
+
typedef Derived EntryType;
|
132 |
+
typedef Key EntryKeyType;
|
133 |
+
typedef Value EntryValueType;
|
134 |
+
static const WireFormatLite::FieldType kEntryKeyFieldType = kKeyFieldType;
|
135 |
+
static const WireFormatLite::FieldType kEntryValueFieldType = kValueFieldType;
|
136 |
+
static const int kEntryDefaultEnumValue = default_enum_value;
|
137 |
+
|
138 |
+
MapEntryImpl() : arena_(NULL) {
|
139 |
+
KeyTypeHandler::Initialize(&key_, NULL);
|
140 |
+
ValueTypeHandler::InitializeMaybeByDefaultEnum(&value_, default_enum_value,
|
141 |
+
NULL);
|
142 |
+
_has_bits_[0] = 0;
|
143 |
+
}
|
144 |
+
|
145 |
+
explicit MapEntryImpl(Arena* arena) : arena_(arena) {
|
146 |
+
KeyTypeHandler::Initialize(&key_, arena);
|
147 |
+
ValueTypeHandler::InitializeMaybeByDefaultEnum(&value_, default_enum_value,
|
148 |
+
arena);
|
149 |
+
_has_bits_[0] = 0;
|
150 |
+
}
|
151 |
+
|
152 |
+
~MapEntryImpl() {
|
153 |
+
if (GetArenaNoVirtual() != NULL) return;
|
154 |
+
KeyTypeHandler::DeleteNoArena(key_);
|
155 |
+
ValueTypeHandler::DeleteNoArena(value_);
|
156 |
+
}
|
157 |
+
|
158 |
+
// accessors ======================================================
|
159 |
+
|
160 |
+
virtual inline const KeyMapEntryAccessorType& key() const {
|
161 |
+
return KeyTypeHandler::GetExternalReference(key_);
|
162 |
+
}
|
163 |
+
virtual inline const ValueMapEntryAccessorType& value() const {
|
164 |
+
return ValueTypeHandler::DefaultIfNotInitialized(
|
165 |
+
value_, Derived::internal_default_instance()->value_);
|
166 |
+
}
|
167 |
+
inline KeyMapEntryAccessorType* mutable_key() {
|
168 |
+
set_has_key();
|
169 |
+
return KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual());
|
170 |
+
}
|
171 |
+
inline ValueMapEntryAccessorType* mutable_value() {
|
172 |
+
set_has_value();
|
173 |
+
return ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual());
|
174 |
+
}
|
175 |
+
|
176 |
+
// implements MessageLite =========================================
|
177 |
+
|
178 |
+
// MapEntryImpl is for implementation only and this function isn't called
|
179 |
+
// anywhere. Just provide a fake implementation here for MessageLite.
|
180 |
+
string GetTypeName() const { return ""; }
|
181 |
+
|
182 |
+
void CheckTypeAndMergeFrom(const MessageLite& other) {
|
183 |
+
MergeFromInternal(*::google::protobuf::down_cast<const Derived*>(&other));
|
184 |
+
}
|
185 |
+
|
186 |
+
bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) {
|
187 |
+
uint32 tag;
|
188 |
+
|
189 |
+
for (;;) {
|
190 |
+
// 1) corrupted data: return false;
|
191 |
+
// 2) unknown field: skip without putting into unknown field set;
|
192 |
+
// 3) unknown enum value: keep it in parsing. In proto2, caller should
|
193 |
+
// check the value and put this entry into containing message's unknown
|
194 |
+
// field set if the value is an unknown enum. In proto3, caller doesn't
|
195 |
+
// need to care whether the value is unknown enum;
|
196 |
+
// 4) missing key/value: missed key/value will have default value. caller
|
197 |
+
// should take this entry as if key/value is set to default value.
|
198 |
+
tag = input->ReadTagNoLastTag();
|
199 |
+
switch (tag) {
|
200 |
+
case kKeyTag:
|
201 |
+
if (!KeyTypeHandler::Read(input, mutable_key())) {
|
202 |
+
return false;
|
203 |
+
}
|
204 |
+
set_has_key();
|
205 |
+
break;
|
206 |
+
|
207 |
+
case kValueTag:
|
208 |
+
if (!ValueTypeHandler::Read(input, mutable_value())) {
|
209 |
+
return false;
|
210 |
+
}
|
211 |
+
set_has_value();
|
212 |
+
if (input->ExpectAtEnd()) return true;
|
213 |
+
break;
|
214 |
+
|
215 |
+
default:
|
216 |
+
if (tag == 0 ||
|
217 |
+
WireFormatLite::GetTagWireType(tag) ==
|
218 |
+
WireFormatLite::WIRETYPE_END_GROUP) {
|
219 |
+
return true;
|
220 |
+
}
|
221 |
+
if (!WireFormatLite::SkipField(input, tag)) return false;
|
222 |
+
break;
|
223 |
+
}
|
224 |
+
}
|
225 |
+
}
|
226 |
+
|
227 |
+
size_t ByteSizeLong() const {
|
228 |
+
size_t size = 0;
|
229 |
+
size += has_key() ?
|
230 |
+
kTagSize + static_cast<size_t>(KeyTypeHandler::ByteSize(key())) : 0;
|
231 |
+
size += has_value() ?
|
232 |
+
kTagSize + static_cast<size_t>(ValueTypeHandler::ByteSize(value())) : 0;
|
233 |
+
return size;
|
234 |
+
}
|
235 |
+
|
236 |
+
void SerializeWithCachedSizes(::google::protobuf::io::CodedOutputStream* output) const {
|
237 |
+
KeyTypeHandler::Write(kKeyFieldNumber, key(), output);
|
238 |
+
ValueTypeHandler::Write(kValueFieldNumber, value(), output);
|
239 |
+
}
|
240 |
+
|
241 |
+
::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(bool deterministic,
|
242 |
+
::google::protobuf::uint8* output) const {
|
243 |
+
output = KeyTypeHandler::InternalWriteToArray(kKeyFieldNumber, key(),
|
244 |
+
deterministic, output);
|
245 |
+
output = ValueTypeHandler::InternalWriteToArray(kValueFieldNumber, value(),
|
246 |
+
deterministic, output);
|
247 |
+
return output;
|
248 |
+
}
|
249 |
+
|
250 |
+
// Don't override SerializeWithCachedSizesToArray. Use MessageLite's.
|
251 |
+
|
252 |
+
int GetCachedSize() const {
|
253 |
+
int size = 0;
|
254 |
+
size += has_key()
|
255 |
+
? static_cast<int>(kTagSize) + KeyTypeHandler::GetCachedSize(key())
|
256 |
+
: 0;
|
257 |
+
size += has_value()
|
258 |
+
? static_cast<int>(kTagSize) + ValueTypeHandler::GetCachedSize(value())
|
259 |
+
: 0;
|
260 |
+
return size;
|
261 |
+
}
|
262 |
+
|
263 |
+
bool IsInitialized() const { return ValueTypeHandler::IsInitialized(value_); }
|
264 |
+
|
265 |
+
Base* New() const {
|
266 |
+
Derived* entry = new Derived;
|
267 |
+
return entry;
|
268 |
+
}
|
269 |
+
|
270 |
+
Base* New(Arena* arena) const {
|
271 |
+
Derived* entry = Arena::CreateMessage<Derived>(arena);
|
272 |
+
return entry;
|
273 |
+
}
|
274 |
+
|
275 |
+
size_t SpaceUsedLong() const {
|
276 |
+
size_t size = sizeof(Derived);
|
277 |
+
size += KeyTypeHandler::SpaceUsedInMapEntryLong(key_);
|
278 |
+
size += ValueTypeHandler::SpaceUsedInMapEntryLong(value_);
|
279 |
+
return size;
|
280 |
+
}
|
281 |
+
|
282 |
+
protected:
|
283 |
+
// We can't declare this function directly here as it would hide the other
|
284 |
+
// overload (const Message&).
|
285 |
+
void MergeFromInternal(const MapEntryImpl& from) {
|
286 |
+
if (from._has_bits_[0]) {
|
287 |
+
if (from.has_key()) {
|
288 |
+
KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual());
|
289 |
+
KeyTypeHandler::Merge(from.key(), &key_, GetArenaNoVirtual());
|
290 |
+
set_has_key();
|
291 |
+
}
|
292 |
+
if (from.has_value()) {
|
293 |
+
ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual());
|
294 |
+
ValueTypeHandler::Merge(from.value(), &value_, GetArenaNoVirtual());
|
295 |
+
set_has_value();
|
296 |
+
}
|
297 |
+
}
|
298 |
+
}
|
299 |
+
|
300 |
+
public:
|
301 |
+
void Clear() {
|
302 |
+
KeyTypeHandler::Clear(&key_, GetArenaNoVirtual());
|
303 |
+
ValueTypeHandler::ClearMaybeByDefaultEnum(
|
304 |
+
&value_, GetArenaNoVirtual(), default_enum_value);
|
305 |
+
clear_has_key();
|
306 |
+
clear_has_value();
|
307 |
+
}
|
308 |
+
|
309 |
+
static void InitAsDefaultInstance() {
|
310 |
+
Derived* d = const_cast<Derived*>(Derived::internal_default_instance());
|
311 |
+
KeyTypeHandler::AssignDefaultValue(&d->key_);
|
312 |
+
ValueTypeHandler::AssignDefaultValue(&d->value_);
|
313 |
+
}
|
314 |
+
|
315 |
+
Arena* GetArena() const {
|
316 |
+
return GetArenaNoVirtual();
|
317 |
+
}
|
318 |
+
|
319 |
+
// Create a MapEntryImpl for given key and value from google::protobuf::Map in
|
320 |
+
// serialization. This function is only called when value is enum. Enum is
|
321 |
+
// treated differently because its type in MapEntry is int and its type in
|
322 |
+
// google::protobuf::Map is enum. We cannot create a reference to int from an enum.
|
323 |
+
static Derived* EnumWrap(const Key& key, const Value value, Arena* arena) {
|
324 |
+
return Arena::CreateMessage<MapEnumEntryWrapper>(arena, key, value);
|
325 |
+
}
|
326 |
+
|
327 |
+
// Like above, but for all the other types. This avoids value copy to create
|
328 |
+
// MapEntryImpl from google::protobuf::Map in serialization.
|
329 |
+
static Derived* Wrap(const Key& key, const Value& value, Arena* arena) {
|
330 |
+
return Arena::CreateMessage<MapEntryWrapper>(arena, key, value);
|
331 |
+
}
|
332 |
+
|
333 |
+
// Parsing using MergePartialFromCodedStream, above, is not as
|
334 |
+
// efficient as it could be. This helper class provides a speedier way.
|
335 |
+
template <typename MapField, typename Map>
|
336 |
+
class Parser {
|
337 |
+
public:
|
338 |
+
explicit Parser(MapField* mf) : mf_(mf), map_(mf->MutableMap()) {}
|
339 |
+
|
340 |
+
// This does what the typical MergePartialFromCodedStream() is expected to
|
341 |
+
// do, with the additional side-effect that if successful (i.e., if true is
|
342 |
+
// going to be its return value) it inserts the key-value pair into map_.
|
343 |
+
bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) {
|
344 |
+
// Look for the expected thing: a key and then a value. If it fails,
|
345 |
+
// invoke the enclosing class's MergePartialFromCodedStream, or return
|
346 |
+
// false if that would be pointless.
|
347 |
+
if (input->ExpectTag(kKeyTag)) {
|
348 |
+
if (!KeyTypeHandler::Read(input, &key_)) {
|
349 |
+
return false;
|
350 |
+
}
|
351 |
+
// Peek at the next byte to see if it is kValueTag. If not, bail out.
|
352 |
+
const void* data;
|
353 |
+
int size;
|
354 |
+
input->GetDirectBufferPointerInline(&data, &size);
|
355 |
+
// We could use memcmp here, but we don't bother. The tag is one byte.
|
356 |
+
GOOGLE_COMPILE_ASSERT(kTagSize == 1, tag_size_error);
|
357 |
+
if (size > 0 && *reinterpret_cast<const char*>(data) == kValueTag) {
|
358 |
+
typename Map::size_type map_size = map_->size();
|
359 |
+
value_ptr_ = &(*map_)[key_];
|
360 |
+
if (GOOGLE_PREDICT_TRUE(map_size != map_->size())) {
|
361 |
+
// We created a new key-value pair. Fill in the value.
|
362 |
+
typedef
|
363 |
+
typename MapIf<ValueTypeHandler::kIsEnum, int*, Value*>::type T;
|
364 |
+
input->Skip(kTagSize); // Skip kValueTag.
|
365 |
+
if (!ValueTypeHandler::Read(input,
|
366 |
+
reinterpret_cast<T>(value_ptr_))) {
|
367 |
+
map_->erase(key_); // Failure! Undo insertion.
|
368 |
+
return false;
|
369 |
+
}
|
370 |
+
if (input->ExpectAtEnd()) return true;
|
371 |
+
return ReadBeyondKeyValuePair(input);
|
372 |
+
}
|
373 |
+
}
|
374 |
+
} else {
|
375 |
+
key_ = Key();
|
376 |
+
}
|
377 |
+
|
378 |
+
entry_.reset(mf_->NewEntry());
|
379 |
+
*entry_->mutable_key() = key_;
|
380 |
+
const bool result = entry_->MergePartialFromCodedStream(input);
|
381 |
+
if (result) UseKeyAndValueFromEntry();
|
382 |
+
if (entry_->GetArena() != NULL) entry_.release();
|
383 |
+
return result;
|
384 |
+
}
|
385 |
+
|
386 |
+
const Key& key() const { return key_; }
|
387 |
+
const Value& value() const { return *value_ptr_; }
|
388 |
+
|
389 |
+
private:
|
390 |
+
void UseKeyAndValueFromEntry() GOOGLE_PROTOBUF_ATTRIBUTE_COLD {
|
391 |
+
// Update key_ in case we need it later (because key() is called).
|
392 |
+
// This is potentially inefficient, especially if the key is
|
393 |
+
// expensive to copy (e.g., a long string), but this is a cold
|
394 |
+
// path, so it's not a big deal.
|
395 |
+
key_ = entry_->key();
|
396 |
+
value_ptr_ = &(*map_)[key_];
|
397 |
+
MoveHelper<ValueTypeHandler::kIsEnum,
|
398 |
+
ValueTypeHandler::kIsMessage,
|
399 |
+
ValueTypeHandler::kWireType ==
|
400 |
+
WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
|
401 |
+
Value>::Move(entry_->mutable_value(), value_ptr_);
|
402 |
+
}
|
403 |
+
|
404 |
+
// After reading a key and value successfully, and inserting that data
|
405 |
+
// into map_, we are not at the end of the input. This is unusual, but
|
406 |
+
// allowed by the spec.
|
407 |
+
bool ReadBeyondKeyValuePair(::google::protobuf::io::CodedInputStream* input)
|
408 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_COLD {
|
409 |
+
typedef MoveHelper<KeyTypeHandler::kIsEnum,
|
410 |
+
KeyTypeHandler::kIsMessage,
|
411 |
+
KeyTypeHandler::kWireType ==
|
412 |
+
WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
|
413 |
+
Key> KeyMover;
|
414 |
+
typedef MoveHelper<ValueTypeHandler::kIsEnum,
|
415 |
+
ValueTypeHandler::kIsMessage,
|
416 |
+
ValueTypeHandler::kWireType ==
|
417 |
+
WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
|
418 |
+
Value> ValueMover;
|
419 |
+
entry_.reset(mf_->NewEntry());
|
420 |
+
ValueMover::Move(value_ptr_, entry_->mutable_value());
|
421 |
+
map_->erase(key_);
|
422 |
+
KeyMover::Move(&key_, entry_->mutable_key());
|
423 |
+
const bool result = entry_->MergePartialFromCodedStream(input);
|
424 |
+
if (result) UseKeyAndValueFromEntry();
|
425 |
+
if (entry_->GetArena() != NULL) entry_.release();
|
426 |
+
return result;
|
427 |
+
}
|
428 |
+
|
429 |
+
MapField* const mf_;
|
430 |
+
Map* const map_;
|
431 |
+
Key key_;
|
432 |
+
Value* value_ptr_;
|
433 |
+
// On the fast path entry_ is not used. And, when entry_ is used, it's set
|
434 |
+
// to mf_->NewEntry(), so in the arena case we must call entry_.release.
|
435 |
+
std::unique_ptr<MapEntryImpl> entry_;
|
436 |
+
};
|
437 |
+
|
438 |
+
protected:
|
439 |
+
void set_has_key() { _has_bits_[0] |= 0x00000001u; }
|
440 |
+
bool has_key() const { return (_has_bits_[0] & 0x00000001u) != 0; }
|
441 |
+
void clear_has_key() { _has_bits_[0] &= ~0x00000001u; }
|
442 |
+
void set_has_value() { _has_bits_[0] |= 0x00000002u; }
|
443 |
+
bool has_value() const { return (_has_bits_[0] & 0x00000002u) != 0; }
|
444 |
+
void clear_has_value() { _has_bits_[0] &= ~0x00000002u; }
|
445 |
+
|
446 |
+
private:
|
447 |
+
// Serializing a generated message containing map field involves serializing
|
448 |
+
// key-value pairs from google::protobuf::Map. The wire format of each key-value pair
|
449 |
+
// after serialization should be the same as that of a MapEntry message
|
450 |
+
// containing the same key and value inside it. However, google::protobuf::Map doesn't
|
451 |
+
// store key and value as MapEntry message, which disables us to use existing
|
452 |
+
// code to serialize message. In order to use existing code to serialize
|
453 |
+
// message, we need to construct a MapEntry from key-value pair. But it
|
454 |
+
// involves copy of key and value to construct a MapEntry. In order to avoid
|
455 |
+
// this copy in constructing a MapEntry, we need the following class which
|
456 |
+
// only takes references of given key and value.
|
457 |
+
class MapEntryWrapper : public Derived {
|
458 |
+
typedef Derived BaseClass;
|
459 |
+
typedef typename BaseClass::KeyMapEntryAccessorType KeyMapEntryAccessorType;
|
460 |
+
typedef
|
461 |
+
typename BaseClass::ValueMapEntryAccessorType ValueMapEntryAccessorType;
|
462 |
+
|
463 |
+
public:
|
464 |
+
MapEntryWrapper(Arena* arena, const Key& key, const Value& value)
|
465 |
+
: Derived(arena), key_(key), value_(value) {
|
466 |
+
BaseClass::set_has_key();
|
467 |
+
BaseClass::set_has_value();
|
468 |
+
}
|
469 |
+
inline const KeyMapEntryAccessorType& key() const { return key_; }
|
470 |
+
inline const ValueMapEntryAccessorType& value() const { return value_; }
|
471 |
+
|
472 |
+
private:
|
473 |
+
const Key& key_;
|
474 |
+
const Value& value_;
|
475 |
+
|
476 |
+
friend class ::google::protobuf::Arena;
|
477 |
+
typedef void InternalArenaConstructable_;
|
478 |
+
typedef void DestructorSkippable_;
|
479 |
+
};
|
480 |
+
|
481 |
+
// Like above, but for enum value only, which stores value instead of
|
482 |
+
// reference of value field inside. This is needed because the type of value
|
483 |
+
// field in constructor is an enum, while we need to store it as an int. If we
|
484 |
+
// initialize a reference to int with a reference to enum, compiler will
|
485 |
+
// generate a temporary int from enum and initialize the reference to int with
|
486 |
+
// the temporary.
|
487 |
+
class MapEnumEntryWrapper : public Derived {
|
488 |
+
typedef Derived BaseClass;
|
489 |
+
typedef typename BaseClass::KeyMapEntryAccessorType KeyMapEntryAccessorType;
|
490 |
+
typedef
|
491 |
+
typename BaseClass::ValueMapEntryAccessorType ValueMapEntryAccessorType;
|
492 |
+
|
493 |
+
public:
|
494 |
+
MapEnumEntryWrapper(Arena* arena, const Key& key, const Value& value)
|
495 |
+
: Derived(arena), key_(key), value_(value) {
|
496 |
+
BaseClass::set_has_key();
|
497 |
+
BaseClass::set_has_value();
|
498 |
+
}
|
499 |
+
inline const KeyMapEntryAccessorType& key() const { return key_; }
|
500 |
+
inline const ValueMapEntryAccessorType& value() const { return value_; }
|
501 |
+
|
502 |
+
private:
|
503 |
+
const KeyMapEntryAccessorType& key_;
|
504 |
+
const ValueMapEntryAccessorType value_;
|
505 |
+
|
506 |
+
friend class google::protobuf::Arena;
|
507 |
+
typedef void DestructorSkippable_;
|
508 |
+
};
|
509 |
+
|
510 |
+
inline Arena* GetArenaNoVirtual() const {
|
511 |
+
return arena_;
|
512 |
+
}
|
513 |
+
|
514 |
+
public: // Needed for constructing tables
|
515 |
+
KeyOnMemory key_;
|
516 |
+
ValueOnMemory value_;
|
517 |
+
Arena* arena_;
|
518 |
+
uint32 _has_bits_[1];
|
519 |
+
|
520 |
+
private:
|
521 |
+
friend class ::google::protobuf::Arena;
|
522 |
+
typedef void InternalArenaConstructable_;
|
523 |
+
typedef void DestructorSkippable_;
|
524 |
+
template <typename C, typename K, typename V, WireFormatLite::FieldType,
|
525 |
+
WireFormatLite::FieldType, int>
|
526 |
+
friend class internal::MapEntry;
|
527 |
+
template <typename C, typename K, typename V, WireFormatLite::FieldType,
|
528 |
+
WireFormatLite::FieldType, int>
|
529 |
+
friend class internal::MapFieldLite;
|
530 |
+
|
531 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntryImpl);
|
532 |
+
};
|
533 |
+
|
534 |
+
template <typename T, typename Key, typename Value,
|
535 |
+
WireFormatLite::FieldType kKeyFieldType,
|
536 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
537 |
+
class MapEntryLite
|
538 |
+
: public MapEntryImpl<T, MessageLite, Key, Value, kKeyFieldType,
|
539 |
+
kValueFieldType, default_enum_value> {
|
540 |
+
public:
|
541 |
+
typedef MapEntryImpl<T, MessageLite, Key, Value, kKeyFieldType,
|
542 |
+
kValueFieldType, default_enum_value>
|
543 |
+
SuperType;
|
544 |
+
MapEntryLite() {}
|
545 |
+
explicit MapEntryLite(Arena* arena) : SuperType(arena) {}
|
546 |
+
void MergeFrom(const MapEntryLite& other) { MergeFromInternal(other); }
|
547 |
+
|
548 |
+
private:
|
549 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntryLite);
|
550 |
+
};
|
551 |
+
// The completely unprincipled and unwieldy use of template parameters in
|
552 |
+
// the map code necessitates wrappers to make the code a little bit more
|
553 |
+
// manageable.
|
554 |
+
template <typename Derived>
|
555 |
+
struct DeconstructMapEntry;
|
556 |
+
|
557 |
+
template <typename T, typename K, typename V, WireFormatLite::FieldType key,
|
558 |
+
WireFormatLite::FieldType value, int default_enum>
|
559 |
+
struct DeconstructMapEntry<MapEntryLite<T, K, V, key, value, default_enum> > {
|
560 |
+
typedef K Key;
|
561 |
+
typedef V Value;
|
562 |
+
static const WireFormatLite::FieldType kKeyFieldType = key;
|
563 |
+
static const WireFormatLite::FieldType kValueFieldType = value;
|
564 |
+
static const int default_enum_value = default_enum;
|
565 |
+
};
|
566 |
+
|
567 |
+
// Helpers for deterministic serialization =============================
|
568 |
+
|
569 |
+
// This struct can be used with any generic sorting algorithm. If the Key
|
570 |
+
// type is relatively small and easy to copy then copying Keys into an
|
571 |
+
// array of SortItems can be beneficial. Then all the data the sorting
|
572 |
+
// algorithm needs to touch is in that one array.
|
573 |
+
template <typename Key, typename PtrToKeyValuePair> struct SortItem {
|
574 |
+
SortItem() {}
|
575 |
+
explicit SortItem(PtrToKeyValuePair p) : first(p->first), second(p) {}
|
576 |
+
|
577 |
+
Key first;
|
578 |
+
PtrToKeyValuePair second;
|
579 |
+
};
|
580 |
+
|
581 |
+
template <typename T> struct CompareByFirstField {
|
582 |
+
bool operator()(const T& a, const T& b) const {
|
583 |
+
return a.first < b.first;
|
584 |
+
}
|
585 |
+
};
|
586 |
+
|
587 |
+
template <typename T> struct CompareByDerefFirst {
|
588 |
+
bool operator()(const T& a, const T& b) const {
|
589 |
+
return a->first < b->first;
|
590 |
+
}
|
591 |
+
};
|
592 |
+
|
593 |
+
// Helper for table driven serialization
|
594 |
+
|
595 |
+
template <WireFormatLite::FieldType FieldType>
|
596 |
+
struct FromHelper {
|
597 |
+
template <typename T>
|
598 |
+
static const T& From(const T& x) {
|
599 |
+
return x;
|
600 |
+
}
|
601 |
+
};
|
602 |
+
|
603 |
+
template <>
|
604 |
+
struct FromHelper<WireFormatLite::TYPE_STRING> {
|
605 |
+
static ArenaStringPtr From(const string& x) {
|
606 |
+
ArenaStringPtr res;
|
607 |
+
TaggedPtr<::std::string> ptr;
|
608 |
+
ptr.Set(const_cast<string*>(&x));
|
609 |
+
res.UnsafeSetTaggedPointer(ptr);
|
610 |
+
return res;
|
611 |
+
}
|
612 |
+
};
|
613 |
+
template <>
|
614 |
+
struct FromHelper<WireFormatLite::TYPE_BYTES> {
|
615 |
+
static ArenaStringPtr From(const string& x) {
|
616 |
+
ArenaStringPtr res;
|
617 |
+
TaggedPtr<::std::string> ptr;
|
618 |
+
ptr.Set(const_cast<string*>(&x));
|
619 |
+
res.UnsafeSetTaggedPointer(ptr);
|
620 |
+
return res;
|
621 |
+
}
|
622 |
+
};
|
623 |
+
template <>
|
624 |
+
struct FromHelper<WireFormatLite::TYPE_MESSAGE> {
|
625 |
+
template <typename T>
|
626 |
+
static T* From(const T& x) {
|
627 |
+
return const_cast<T*>(&x);
|
628 |
+
}
|
629 |
+
};
|
630 |
+
|
631 |
+
template <typename MapEntryType>
|
632 |
+
struct MapEntryHelper;
|
633 |
+
|
634 |
+
template <typename T, typename Key, typename Value,
|
635 |
+
WireFormatLite::FieldType kKeyFieldType,
|
636 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
637 |
+
struct MapEntryHelper<MapEntryLite<T, Key, Value, kKeyFieldType,
|
638 |
+
kValueFieldType, default_enum_value> > {
|
639 |
+
// Provide utilities to parse/serialize key/value. Provide utilities to
|
640 |
+
// manipulate internal stored type.
|
641 |
+
typedef MapTypeHandler<kKeyFieldType, Key> KeyTypeHandler;
|
642 |
+
typedef MapTypeHandler<kValueFieldType, Value> ValueTypeHandler;
|
643 |
+
|
644 |
+
// Define internal memory layout. Strings and messages are stored as
|
645 |
+
// pointers, while other types are stored as values.
|
646 |
+
typedef typename KeyTypeHandler::TypeOnMemory KeyOnMemory;
|
647 |
+
typedef typename ValueTypeHandler::TypeOnMemory ValueOnMemory;
|
648 |
+
|
649 |
+
explicit MapEntryHelper(const MapPair<Key, Value>& map_pair)
|
650 |
+
: _has_bits_(3),
|
651 |
+
_cached_size_(2 + KeyTypeHandler::GetCachedSize(map_pair.first) +
|
652 |
+
ValueTypeHandler::GetCachedSize(map_pair.second)),
|
653 |
+
key_(FromHelper<kKeyFieldType>::From(map_pair.first)),
|
654 |
+
value_(FromHelper<kValueFieldType>::From(map_pair.second)) {}
|
655 |
+
|
656 |
+
// Purposely not folowing the style guide naming. These are the names
|
657 |
+
// the proto compiler would generate given the map entry descriptor.
|
658 |
+
// The proto compiler generates the offsets in this struct as if this was
|
659 |
+
// a regular message. This way the table driven code barely notices it's
|
660 |
+
// dealing with a map field.
|
661 |
+
uint32 _has_bits_; // NOLINT
|
662 |
+
uint32 _cached_size_; // NOLINT
|
663 |
+
KeyOnMemory key_; // NOLINT
|
664 |
+
ValueOnMemory value_; // NOLINT
|
665 |
+
};
|
666 |
+
|
667 |
+
} // namespace internal
|
668 |
+
} // namespace protobuf
|
669 |
+
|
670 |
+
} // namespace google
|
671 |
+
#endif // GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_field_lite.h
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__
|
32 |
+
#define GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/map.h>
|
35 |
+
#include <google/protobuf/map_entry_lite.h>
|
36 |
+
#include <google/protobuf/wire_format_lite.h>
|
37 |
+
|
38 |
+
namespace google {
|
39 |
+
namespace protobuf {
|
40 |
+
namespace internal {
|
41 |
+
|
42 |
+
// This class provides access to map field using generated api. It is used for
|
43 |
+
// internal generated message implentation only. Users should never use this
|
44 |
+
// directly.
|
45 |
+
template <typename Derived, typename Key, typename T,
|
46 |
+
WireFormatLite::FieldType key_wire_type,
|
47 |
+
WireFormatLite::FieldType value_wire_type, int default_enum_value = 0>
|
48 |
+
class MapFieldLite {
|
49 |
+
// Define message type for internal repeated field.
|
50 |
+
typedef Derived EntryType;
|
51 |
+
|
52 |
+
public:
|
53 |
+
typedef Map<Key, T> MapType;
|
54 |
+
typedef EntryType EntryTypeTrait;
|
55 |
+
|
56 |
+
MapFieldLite() : arena_(NULL) { SetDefaultEnumValue(); }
|
57 |
+
|
58 |
+
explicit MapFieldLite(Arena* arena) : arena_(arena), map_(arena) {
|
59 |
+
SetDefaultEnumValue();
|
60 |
+
}
|
61 |
+
|
62 |
+
// Accessors
|
63 |
+
const Map<Key, T>& GetMap() const { return map_; }
|
64 |
+
Map<Key, T>* MutableMap() { return &map_; }
|
65 |
+
|
66 |
+
// Convenient methods for generated message implementation.
|
67 |
+
int size() const { return static_cast<int>(map_.size()); }
|
68 |
+
void Clear() { return map_.clear(); }
|
69 |
+
void MergeFrom(const MapFieldLite& other) {
|
70 |
+
for (typename Map<Key, T>::const_iterator it = other.map_.begin();
|
71 |
+
it != other.map_.end(); ++it) {
|
72 |
+
map_[it->first] = it->second;
|
73 |
+
}
|
74 |
+
}
|
75 |
+
void Swap(MapFieldLite* other) { map_.swap(other->map_); }
|
76 |
+
|
77 |
+
// Set default enum value only for proto2 map field whose value is enum type.
|
78 |
+
void SetDefaultEnumValue() {
|
79 |
+
MutableMap()->SetDefaultEnumValue(default_enum_value);
|
80 |
+
}
|
81 |
+
|
82 |
+
// Used in the implementation of parsing. Caller should take the ownership iff
|
83 |
+
// arena_ is NULL.
|
84 |
+
EntryType* NewEntry() const {
|
85 |
+
if (arena_ == NULL) {
|
86 |
+
return new EntryType();
|
87 |
+
} else {
|
88 |
+
return Arena::CreateMessage<EntryType>(arena_);
|
89 |
+
}
|
90 |
+
}
|
91 |
+
// Used in the implementation of serializing enum value type. Caller should
|
92 |
+
// take the ownership iff arena_ is NULL.
|
93 |
+
EntryType* NewEnumEntryWrapper(const Key& key, const T t) const {
|
94 |
+
return EntryType::EnumWrap(key, t, arena_);
|
95 |
+
}
|
96 |
+
// Used in the implementation of serializing other value types. Caller should
|
97 |
+
// take the ownership iff arena_ is NULL.
|
98 |
+
EntryType* NewEntryWrapper(const Key& key, const T& t) const {
|
99 |
+
return EntryType::Wrap(key, t, arena_);
|
100 |
+
}
|
101 |
+
|
102 |
+
private:
|
103 |
+
typedef void DestructorSkippable_;
|
104 |
+
|
105 |
+
Arena* arena_;
|
106 |
+
Map<Key, T> map_;
|
107 |
+
|
108 |
+
friend class ::google::protobuf::Arena;
|
109 |
+
};
|
110 |
+
|
111 |
+
// True if IsInitialized() is true for value field in all elements of t. T is
|
112 |
+
// expected to be message. It's useful to have this helper here to keep the
|
113 |
+
// protobuf compiler from ever having to emit loops in IsInitialized() methods.
|
114 |
+
// We want the C++ compiler to inline this or not as it sees fit.
|
115 |
+
template <typename Key, typename T>
|
116 |
+
bool AllAreInitialized(const Map<Key, T>& t) {
|
117 |
+
for (typename Map<Key, T>::const_iterator it = t.begin(); it != t.end();
|
118 |
+
++it) {
|
119 |
+
if (!it->second.IsInitialized()) return false;
|
120 |
+
}
|
121 |
+
return true;
|
122 |
+
}
|
123 |
+
|
124 |
+
template <typename MEntry>
|
125 |
+
struct MapEntryToMapField : MapEntryToMapField<typename MEntry::SuperType> {};
|
126 |
+
|
127 |
+
template <typename T, typename Key, typename Value,
|
128 |
+
WireFormatLite::FieldType kKeyFieldType,
|
129 |
+
WireFormatLite::FieldType kValueFieldType, int default_enum_value>
|
130 |
+
struct MapEntryToMapField<MapEntryLite<T, Key, Value, kKeyFieldType,
|
131 |
+
kValueFieldType, default_enum_value> > {
|
132 |
+
typedef MapFieldLite<MapEntryLite<T, Key, Value, kKeyFieldType,
|
133 |
+
kValueFieldType, default_enum_value>,
|
134 |
+
Key, Value, kKeyFieldType, kValueFieldType,
|
135 |
+
default_enum_value>
|
136 |
+
MapFieldType;
|
137 |
+
};
|
138 |
+
|
139 |
+
} // namespace internal
|
140 |
+
} // namespace protobuf
|
141 |
+
|
142 |
+
} // namespace google
|
143 |
+
#endif // GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/map_type_handler.h
ADDED
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_TYPE_HANDLER_H__
|
32 |
+
#define GOOGLE_PROTOBUF_TYPE_HANDLER_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/arena.h>
|
35 |
+
#include <google/protobuf/wire_format_lite_inl.h>
|
36 |
+
|
37 |
+
namespace google {
|
38 |
+
namespace protobuf {
|
39 |
+
namespace internal {
|
40 |
+
|
41 |
+
// Used for compile time type selection. MapIf::type will be TrueType if Flag is
|
42 |
+
// true and FalseType otherwise.
|
43 |
+
template<bool Flag, typename TrueType, typename FalseType>
|
44 |
+
struct MapIf;
|
45 |
+
|
46 |
+
template<typename TrueType, typename FalseType>
|
47 |
+
struct MapIf<true, TrueType, FalseType> {
|
48 |
+
typedef TrueType type;
|
49 |
+
};
|
50 |
+
|
51 |
+
template<typename TrueType, typename FalseType>
|
52 |
+
struct MapIf<false, TrueType, FalseType> {
|
53 |
+
typedef FalseType type;
|
54 |
+
};
|
55 |
+
|
56 |
+
// In proto2 Map, enum needs to be initialized to given default value, while
|
57 |
+
// other types' default value can be inferred from the type.
|
58 |
+
template <bool IsEnum, typename Type>
|
59 |
+
class MapValueInitializer {
|
60 |
+
public:
|
61 |
+
static inline void Initialize(Type& type, int default_enum_value);
|
62 |
+
};
|
63 |
+
|
64 |
+
template <typename Type>
|
65 |
+
class MapValueInitializer<true, Type> {
|
66 |
+
public:
|
67 |
+
static inline void Initialize(Type& value, int default_enum_value) {
|
68 |
+
value = static_cast<Type>(default_enum_value);
|
69 |
+
}
|
70 |
+
};
|
71 |
+
|
72 |
+
template <typename Type>
|
73 |
+
class MapValueInitializer<false, Type> {
|
74 |
+
public:
|
75 |
+
static inline void Initialize(Type& /* value */, int /* default_enum_value */) {}
|
76 |
+
};
|
77 |
+
|
78 |
+
template <typename Type, bool is_arena_constructable>
|
79 |
+
class MapArenaMessageCreator {
|
80 |
+
public:
|
81 |
+
// Use arena to create message if Type is arena constructable. Otherwise,
|
82 |
+
// create the message on heap.
|
83 |
+
static inline Type* CreateMessage(Arena* arena);
|
84 |
+
};
|
85 |
+
template <typename Type>
|
86 |
+
class MapArenaMessageCreator<Type, true> {
|
87 |
+
public:
|
88 |
+
static inline Type* CreateMessage(Arena* arena) {
|
89 |
+
return Arena::CreateMessage<Type>(arena);
|
90 |
+
}
|
91 |
+
};
|
92 |
+
template <typename Type>
|
93 |
+
class MapArenaMessageCreator<Type, false> {
|
94 |
+
public:
|
95 |
+
static inline Type* CreateMessage(Arena* arena) {
|
96 |
+
return Arena::Create<Type>(arena);
|
97 |
+
}
|
98 |
+
};
|
99 |
+
|
100 |
+
// Define constants for given wire field type
|
101 |
+
template <WireFormatLite::FieldType field_type, typename Type>
|
102 |
+
class MapWireFieldTypeTraits {};
|
103 |
+
|
104 |
+
#define TYPE_TRAITS(FieldType, CType, WireFormatType, IsMessage, IsEnum) \
|
105 |
+
template <typename Type> \
|
106 |
+
class MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, Type> { \
|
107 |
+
public: \
|
108 |
+
static const bool kIsMessage = IsMessage; \
|
109 |
+
static const bool kIsEnum = IsEnum; \
|
110 |
+
typedef typename MapIf<kIsMessage, Type*, CType>::type TypeOnMemory; \
|
111 |
+
typedef typename MapIf<kIsEnum, int, Type>::type MapEntryAccessorType; \
|
112 |
+
static const WireFormatLite::WireType kWireType = \
|
113 |
+
WireFormatLite::WIRETYPE_##WireFormatType; \
|
114 |
+
};
|
115 |
+
|
116 |
+
TYPE_TRAITS(MESSAGE , Type, LENGTH_DELIMITED, true, false)
|
117 |
+
TYPE_TRAITS(STRING , ArenaStringPtr, LENGTH_DELIMITED, false, false)
|
118 |
+
TYPE_TRAITS(BYTES , ArenaStringPtr , LENGTH_DELIMITED, false, false)
|
119 |
+
TYPE_TRAITS(INT64 , int64 , VARINT , false, false)
|
120 |
+
TYPE_TRAITS(UINT64 , uint64 , VARINT , false, false)
|
121 |
+
TYPE_TRAITS(INT32 , int32 , VARINT , false, false)
|
122 |
+
TYPE_TRAITS(UINT32 , uint32 , VARINT , false, false)
|
123 |
+
TYPE_TRAITS(SINT64 , int64 , VARINT , false, false)
|
124 |
+
TYPE_TRAITS(SINT32 , int32 , VARINT , false, false)
|
125 |
+
TYPE_TRAITS(ENUM , int , VARINT , false, true )
|
126 |
+
TYPE_TRAITS(DOUBLE , double , FIXED64, false, false)
|
127 |
+
TYPE_TRAITS(FLOAT , float , FIXED32, false, false)
|
128 |
+
TYPE_TRAITS(FIXED64 , uint64 , FIXED64, false, false)
|
129 |
+
TYPE_TRAITS(FIXED32 , uint32 , FIXED32, false, false)
|
130 |
+
TYPE_TRAITS(SFIXED64, int64 , FIXED64, false, false)
|
131 |
+
TYPE_TRAITS(SFIXED32, int32 , FIXED32, false, false)
|
132 |
+
TYPE_TRAITS(BOOL , bool , VARINT , false, false)
|
133 |
+
|
134 |
+
#undef TYPE_TRAITS
|
135 |
+
|
136 |
+
template <WireFormatLite::FieldType field_type, typename Type>
|
137 |
+
class MapTypeHandler {};
|
138 |
+
|
139 |
+
template <typename Type>
|
140 |
+
class MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type> {
|
141 |
+
public:
|
142 |
+
// Enum type cannot be used for MapTypeHandler::Read. Define a type which will
|
143 |
+
// replace Enum with int.
|
144 |
+
typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE,
|
145 |
+
Type>::MapEntryAccessorType MapEntryAccessorType;
|
146 |
+
// Internal stored type in MapEntryLite for given wire field type.
|
147 |
+
typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE,
|
148 |
+
Type>::TypeOnMemory TypeOnMemory;
|
149 |
+
// Corresponding wire type for field type.
|
150 |
+
static const WireFormatLite::WireType kWireType =
|
151 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kWireType;
|
152 |
+
// Whether wire type is for message.
|
153 |
+
static const bool kIsMessage =
|
154 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kIsMessage;
|
155 |
+
// Whether wire type is for enum.
|
156 |
+
static const bool kIsEnum =
|
157 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kIsEnum;
|
158 |
+
|
159 |
+
// Functions used in parsing and serialization. ===================
|
160 |
+
static inline size_t ByteSize(const MapEntryAccessorType& value);
|
161 |
+
static inline int GetCachedSize(const MapEntryAccessorType& value);
|
162 |
+
static inline bool Read(io::CodedInputStream* input,
|
163 |
+
MapEntryAccessorType* value);
|
164 |
+
static inline void Write(int field, const MapEntryAccessorType& value,
|
165 |
+
io::CodedOutputStream* output);
|
166 |
+
static inline uint8* InternalWriteToArray(int field,
|
167 |
+
const MapEntryAccessorType& value,
|
168 |
+
bool deterministic, uint8* target);
|
169 |
+
static inline uint8* WriteToArray(int field,
|
170 |
+
const MapEntryAccessorType& value,
|
171 |
+
uint8* target);
|
172 |
+
|
173 |
+
// Functions to manipulate data on memory. ========================
|
174 |
+
static inline const Type& GetExternalReference(const Type* value);
|
175 |
+
static inline void DeleteNoArena(const Type* x);
|
176 |
+
static inline void Merge(const Type& from, Type** to, Arena* arena);
|
177 |
+
static inline void Clear(Type** value, Arena* arena);
|
178 |
+
static inline void ClearMaybeByDefaultEnum(Type** value, Arena* arena,
|
179 |
+
int default_enum_value);
|
180 |
+
static inline void Initialize(Type** x, Arena* arena);
|
181 |
+
|
182 |
+
static inline void InitializeMaybeByDefaultEnum(Type** x,
|
183 |
+
int default_enum_value,
|
184 |
+
Arena* arena);
|
185 |
+
static inline Type* EnsureMutable(Type** value, Arena* arena);
|
186 |
+
// SpaceUsedInMapEntry: Return bytes used by value in MapEntry, excluding
|
187 |
+
// those already calculate in sizeof(MapField).
|
188 |
+
static inline size_t SpaceUsedInMapEntryLong(const Type* value);
|
189 |
+
// Return bytes used by value in Map.
|
190 |
+
static inline size_t SpaceUsedInMapLong(const Type& value);
|
191 |
+
// Assign default value to given instance.
|
192 |
+
static inline void AssignDefaultValue(Type** value);
|
193 |
+
// Return default instance if value is not initialized when calling const
|
194 |
+
// reference accessor.
|
195 |
+
static inline const Type& DefaultIfNotInitialized(
|
196 |
+
const Type* value, const Type* default_value);
|
197 |
+
// Check if all required fields have values set.
|
198 |
+
static inline bool IsInitialized(Type* value);
|
199 |
+
};
|
200 |
+
|
201 |
+
#define MAP_HANDLER(FieldType) \
|
202 |
+
template <typename Type> \
|
203 |
+
class MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type> { \
|
204 |
+
public: \
|
205 |
+
typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \
|
206 |
+
Type>::MapEntryAccessorType \
|
207 |
+
MapEntryAccessorType; \
|
208 |
+
typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \
|
209 |
+
Type>::TypeOnMemory TypeOnMemory; \
|
210 |
+
static const WireFormatLite::WireType kWireType = \
|
211 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \
|
212 |
+
Type>::kWireType; \
|
213 |
+
static const bool kIsMessage = \
|
214 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \
|
215 |
+
Type>::kIsMessage; \
|
216 |
+
static const bool kIsEnum = \
|
217 |
+
MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \
|
218 |
+
Type>::kIsEnum; \
|
219 |
+
static inline int ByteSize(const MapEntryAccessorType& value); \
|
220 |
+
static inline int GetCachedSize(const MapEntryAccessorType& value); \
|
221 |
+
static inline bool Read(io::CodedInputStream* input, \
|
222 |
+
MapEntryAccessorType* value); \
|
223 |
+
static inline void Write(int field, const MapEntryAccessorType& value, \
|
224 |
+
io::CodedOutputStream* output); \
|
225 |
+
static inline uint8* InternalWriteToArray( \
|
226 |
+
int field, const MapEntryAccessorType& value, bool deterministic, \
|
227 |
+
uint8* target); \
|
228 |
+
static inline uint8* WriteToArray(int field, \
|
229 |
+
const MapEntryAccessorType& value, \
|
230 |
+
uint8* target) { \
|
231 |
+
return InternalWriteToArray(field, value, false, target); \
|
232 |
+
} \
|
233 |
+
static inline const MapEntryAccessorType& GetExternalReference( \
|
234 |
+
const TypeOnMemory& value); \
|
235 |
+
static inline void DeleteNoArena(const TypeOnMemory& x); \
|
236 |
+
static inline void Merge(const MapEntryAccessorType& from, \
|
237 |
+
TypeOnMemory* to, Arena* arena); \
|
238 |
+
static inline void Clear(TypeOnMemory* value, Arena* arena); \
|
239 |
+
static inline void ClearMaybeByDefaultEnum(TypeOnMemory* value, \
|
240 |
+
Arena* arena, \
|
241 |
+
int default_enum); \
|
242 |
+
static inline size_t SpaceUsedInMapEntryLong(const TypeOnMemory& value); \
|
243 |
+
static inline size_t SpaceUsedInMapLong(const TypeOnMemory& value); \
|
244 |
+
static inline size_t SpaceUsedInMapLong(const string& value); \
|
245 |
+
static inline void AssignDefaultValue(TypeOnMemory* value); \
|
246 |
+
static inline const MapEntryAccessorType& DefaultIfNotInitialized( \
|
247 |
+
const TypeOnMemory& value, const TypeOnMemory& default_value); \
|
248 |
+
static inline bool IsInitialized(const TypeOnMemory& value); \
|
249 |
+
static void DeleteNoArena(TypeOnMemory& value); \
|
250 |
+
static inline void Initialize(TypeOnMemory* value, Arena* arena); \
|
251 |
+
static inline void InitializeMaybeByDefaultEnum(TypeOnMemory* value, \
|
252 |
+
int default_enum_value, \
|
253 |
+
Arena* arena); \
|
254 |
+
static inline MapEntryAccessorType* EnsureMutable(TypeOnMemory* value, \
|
255 |
+
Arena* arena); \
|
256 |
+
};
|
257 |
+
MAP_HANDLER(STRING)
|
258 |
+
MAP_HANDLER(BYTES)
|
259 |
+
MAP_HANDLER(INT64)
|
260 |
+
MAP_HANDLER(UINT64)
|
261 |
+
MAP_HANDLER(INT32)
|
262 |
+
MAP_HANDLER(UINT32)
|
263 |
+
MAP_HANDLER(SINT64)
|
264 |
+
MAP_HANDLER(SINT32)
|
265 |
+
MAP_HANDLER(ENUM)
|
266 |
+
MAP_HANDLER(DOUBLE)
|
267 |
+
MAP_HANDLER(FLOAT)
|
268 |
+
MAP_HANDLER(FIXED64)
|
269 |
+
MAP_HANDLER(FIXED32)
|
270 |
+
MAP_HANDLER(SFIXED64)
|
271 |
+
MAP_HANDLER(SFIXED32)
|
272 |
+
MAP_HANDLER(BOOL)
|
273 |
+
#undef MAP_HANDLER
|
274 |
+
|
275 |
+
template <typename Type>
|
276 |
+
inline size_t
|
277 |
+
MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::ByteSize(
|
278 |
+
const MapEntryAccessorType& value) {
|
279 |
+
return WireFormatLite::MessageSizeNoVirtual(value);
|
280 |
+
}
|
281 |
+
|
282 |
+
#define GOOGLE_PROTOBUF_BYTE_SIZE(FieldType, DeclaredType) \
|
283 |
+
template <typename Type> \
|
284 |
+
inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::ByteSize( \
|
285 |
+
const MapEntryAccessorType& value) { \
|
286 |
+
return static_cast<int>(WireFormatLite::DeclaredType##Size(value)); \
|
287 |
+
}
|
288 |
+
|
289 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(STRING, String)
|
290 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(BYTES , Bytes)
|
291 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(INT64 , Int64)
|
292 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(UINT64, UInt64)
|
293 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(INT32 , Int32)
|
294 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(UINT32, UInt32)
|
295 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(SINT64, SInt64)
|
296 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(SINT32, SInt32)
|
297 |
+
GOOGLE_PROTOBUF_BYTE_SIZE(ENUM , Enum)
|
298 |
+
|
299 |
+
#undef GOOGLE_PROTOBUF_BYTE_SIZE
|
300 |
+
|
301 |
+
#define FIXED_BYTE_SIZE(FieldType, DeclaredType) \
|
302 |
+
template <typename Type> \
|
303 |
+
inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::ByteSize( \
|
304 |
+
const MapEntryAccessorType& /* value */) { \
|
305 |
+
return WireFormatLite::k##DeclaredType##Size; \
|
306 |
+
}
|
307 |
+
|
308 |
+
FIXED_BYTE_SIZE(DOUBLE , Double)
|
309 |
+
FIXED_BYTE_SIZE(FLOAT , Float)
|
310 |
+
FIXED_BYTE_SIZE(FIXED64 , Fixed64)
|
311 |
+
FIXED_BYTE_SIZE(FIXED32 , Fixed32)
|
312 |
+
FIXED_BYTE_SIZE(SFIXED64, SFixed64)
|
313 |
+
FIXED_BYTE_SIZE(SFIXED32, SFixed32)
|
314 |
+
FIXED_BYTE_SIZE(BOOL , Bool)
|
315 |
+
|
316 |
+
#undef FIXED_BYTE_SIZE
|
317 |
+
|
318 |
+
template <typename Type>
|
319 |
+
inline int
|
320 |
+
MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::GetCachedSize(
|
321 |
+
const MapEntryAccessorType& value) {
|
322 |
+
return static_cast<int>(
|
323 |
+
WireFormatLite::LengthDelimitedSize(
|
324 |
+
static_cast<size_t>(value.GetCachedSize())));
|
325 |
+
}
|
326 |
+
|
327 |
+
#define GET_CACHED_SIZE(FieldType, DeclaredType) \
|
328 |
+
template <typename Type> \
|
329 |
+
inline int \
|
330 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::GetCachedSize( \
|
331 |
+
const MapEntryAccessorType& value) { \
|
332 |
+
return static_cast<int>(WireFormatLite::DeclaredType##Size(value)); \
|
333 |
+
}
|
334 |
+
|
335 |
+
GET_CACHED_SIZE(STRING, String)
|
336 |
+
GET_CACHED_SIZE(BYTES , Bytes)
|
337 |
+
GET_CACHED_SIZE(INT64 , Int64)
|
338 |
+
GET_CACHED_SIZE(UINT64, UInt64)
|
339 |
+
GET_CACHED_SIZE(INT32 , Int32)
|
340 |
+
GET_CACHED_SIZE(UINT32, UInt32)
|
341 |
+
GET_CACHED_SIZE(SINT64, SInt64)
|
342 |
+
GET_CACHED_SIZE(SINT32, SInt32)
|
343 |
+
GET_CACHED_SIZE(ENUM , Enum)
|
344 |
+
|
345 |
+
#undef GET_CACHED_SIZE
|
346 |
+
|
347 |
+
#define GET_FIXED_CACHED_SIZE(FieldType, DeclaredType) \
|
348 |
+
template <typename Type> \
|
349 |
+
inline int \
|
350 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::GetCachedSize( \
|
351 |
+
const MapEntryAccessorType& /* value */) { \
|
352 |
+
return WireFormatLite::k##DeclaredType##Size; \
|
353 |
+
}
|
354 |
+
|
355 |
+
GET_FIXED_CACHED_SIZE(DOUBLE , Double)
|
356 |
+
GET_FIXED_CACHED_SIZE(FLOAT , Float)
|
357 |
+
GET_FIXED_CACHED_SIZE(FIXED64 , Fixed64)
|
358 |
+
GET_FIXED_CACHED_SIZE(FIXED32 , Fixed32)
|
359 |
+
GET_FIXED_CACHED_SIZE(SFIXED64, SFixed64)
|
360 |
+
GET_FIXED_CACHED_SIZE(SFIXED32, SFixed32)
|
361 |
+
GET_FIXED_CACHED_SIZE(BOOL , Bool)
|
362 |
+
|
363 |
+
#undef GET_FIXED_CACHED_SIZE
|
364 |
+
|
365 |
+
template <typename Type>
|
366 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Write(
|
367 |
+
int field, const MapEntryAccessorType& value,
|
368 |
+
io::CodedOutputStream* output) {
|
369 |
+
WireFormatLite::WriteMessageMaybeToArray(field, value, output);
|
370 |
+
}
|
371 |
+
|
372 |
+
template <typename Type>
|
373 |
+
inline uint8*
|
374 |
+
MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::InternalWriteToArray(
|
375 |
+
int field, const MapEntryAccessorType& value, bool deterministic,
|
376 |
+
uint8* target) {
|
377 |
+
return WireFormatLite::InternalWriteMessageToArray(field, value,
|
378 |
+
deterministic, target);
|
379 |
+
}
|
380 |
+
|
381 |
+
#define WRITE_METHOD(FieldType, DeclaredType) \
|
382 |
+
template <typename Type> \
|
383 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Write( \
|
384 |
+
int field, const MapEntryAccessorType& value, \
|
385 |
+
io::CodedOutputStream* output) { \
|
386 |
+
return WireFormatLite::Write##DeclaredType(field, value, output); \
|
387 |
+
} \
|
388 |
+
template <typename Type> \
|
389 |
+
inline uint8* \
|
390 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
391 |
+
Type>::InternalWriteToArray( \
|
392 |
+
int field, const MapEntryAccessorType& value, bool, uint8* target) { \
|
393 |
+
return WireFormatLite::Write##DeclaredType##ToArray(field, value, target); \
|
394 |
+
}
|
395 |
+
|
396 |
+
WRITE_METHOD(STRING , String)
|
397 |
+
WRITE_METHOD(BYTES , Bytes)
|
398 |
+
WRITE_METHOD(INT64 , Int64)
|
399 |
+
WRITE_METHOD(UINT64 , UInt64)
|
400 |
+
WRITE_METHOD(INT32 , Int32)
|
401 |
+
WRITE_METHOD(UINT32 , UInt32)
|
402 |
+
WRITE_METHOD(SINT64 , SInt64)
|
403 |
+
WRITE_METHOD(SINT32 , SInt32)
|
404 |
+
WRITE_METHOD(ENUM , Enum)
|
405 |
+
WRITE_METHOD(DOUBLE , Double)
|
406 |
+
WRITE_METHOD(FLOAT , Float)
|
407 |
+
WRITE_METHOD(FIXED64 , Fixed64)
|
408 |
+
WRITE_METHOD(FIXED32 , Fixed32)
|
409 |
+
WRITE_METHOD(SFIXED64, SFixed64)
|
410 |
+
WRITE_METHOD(SFIXED32, SFixed32)
|
411 |
+
WRITE_METHOD(BOOL , Bool)
|
412 |
+
|
413 |
+
#undef WRITE_METHOD
|
414 |
+
|
415 |
+
template <typename Type>
|
416 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Read(
|
417 |
+
io::CodedInputStream* input, MapEntryAccessorType* value) {
|
418 |
+
return WireFormatLite::ReadMessageNoVirtual(input, value);
|
419 |
+
}
|
420 |
+
|
421 |
+
template <typename Type>
|
422 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_STRING, Type>::Read(
|
423 |
+
io::CodedInputStream* input, MapEntryAccessorType* value) {
|
424 |
+
return WireFormatLite::ReadString(input, value);
|
425 |
+
}
|
426 |
+
|
427 |
+
template <typename Type>
|
428 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_BYTES, Type>::Read(
|
429 |
+
io::CodedInputStream* input, MapEntryAccessorType* value) {
|
430 |
+
return WireFormatLite::ReadBytes(input, value);
|
431 |
+
}
|
432 |
+
|
433 |
+
#define READ_METHOD(FieldType) \
|
434 |
+
template <typename Type> \
|
435 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Read( \
|
436 |
+
io::CodedInputStream* input, MapEntryAccessorType* value) { \
|
437 |
+
return WireFormatLite::ReadPrimitive<TypeOnMemory, \
|
438 |
+
WireFormatLite::TYPE_##FieldType>( \
|
439 |
+
input, value); \
|
440 |
+
}
|
441 |
+
|
442 |
+
READ_METHOD(INT64)
|
443 |
+
READ_METHOD(UINT64)
|
444 |
+
READ_METHOD(INT32)
|
445 |
+
READ_METHOD(UINT32)
|
446 |
+
READ_METHOD(SINT64)
|
447 |
+
READ_METHOD(SINT32)
|
448 |
+
READ_METHOD(ENUM)
|
449 |
+
READ_METHOD(DOUBLE)
|
450 |
+
READ_METHOD(FLOAT)
|
451 |
+
READ_METHOD(FIXED64)
|
452 |
+
READ_METHOD(FIXED32)
|
453 |
+
READ_METHOD(SFIXED64)
|
454 |
+
READ_METHOD(SFIXED32)
|
455 |
+
READ_METHOD(BOOL)
|
456 |
+
|
457 |
+
#undef READ_METHOD
|
458 |
+
|
459 |
+
// Definition for message handler
|
460 |
+
|
461 |
+
template <typename Type>
|
462 |
+
inline const Type&
|
463 |
+
MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
464 |
+
Type>::GetExternalReference(const Type* value) {
|
465 |
+
return *value;
|
466 |
+
}
|
467 |
+
|
468 |
+
template <typename Type>
|
469 |
+
inline size_t MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
470 |
+
Type>::SpaceUsedInMapEntryLong(const Type* value) {
|
471 |
+
return value->SpaceUsedLong();
|
472 |
+
}
|
473 |
+
|
474 |
+
template <typename Type>
|
475 |
+
size_t MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::SpaceUsedInMapLong(
|
476 |
+
const Type& value) {
|
477 |
+
return value.SpaceUsedLong();
|
478 |
+
}
|
479 |
+
|
480 |
+
template <typename Type>
|
481 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Clear(
|
482 |
+
Type** value, Arena* /* arena */) {
|
483 |
+
if (*value != NULL) (*value)->Clear();
|
484 |
+
}
|
485 |
+
template <typename Type>
|
486 |
+
inline void
|
487 |
+
MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
488 |
+
Type>::ClearMaybeByDefaultEnum(Type** value,
|
489 |
+
Arena* /* arena */,
|
490 |
+
int /* default_enum_value */) {
|
491 |
+
if (*value != NULL) (*value)->Clear();
|
492 |
+
}
|
493 |
+
template <typename Type>
|
494 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Merge(
|
495 |
+
const Type& from, Type** to, Arena* /* arena */) {
|
496 |
+
(*to)->MergeFrom(from);
|
497 |
+
}
|
498 |
+
|
499 |
+
template <typename Type>
|
500 |
+
void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::DeleteNoArena(
|
501 |
+
const Type* ptr) {
|
502 |
+
delete ptr;
|
503 |
+
}
|
504 |
+
|
505 |
+
template <typename Type>
|
506 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
507 |
+
Type>::AssignDefaultValue(Type** value) {
|
508 |
+
*value = const_cast<Type*>(Type::internal_default_instance());
|
509 |
+
}
|
510 |
+
|
511 |
+
template <typename Type>
|
512 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
513 |
+
Type>::Initialize(Type** x,
|
514 |
+
Arena* /* arena */) {
|
515 |
+
*x = NULL;
|
516 |
+
}
|
517 |
+
|
518 |
+
template <typename Type>
|
519 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::
|
520 |
+
InitializeMaybeByDefaultEnum(Type** x, int /* default_enum_value */,
|
521 |
+
Arena* /* arena */) {
|
522 |
+
*x = NULL;
|
523 |
+
}
|
524 |
+
|
525 |
+
template <typename Type>
|
526 |
+
inline Type* MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
527 |
+
Type>::EnsureMutable(Type** value,
|
528 |
+
Arena* arena) {
|
529 |
+
if (*value == NULL) {
|
530 |
+
*value =
|
531 |
+
MapArenaMessageCreator<Type, Arena::is_arena_constructable<Type>::
|
532 |
+
type::value>::CreateMessage(arena);
|
533 |
+
}
|
534 |
+
return *value;
|
535 |
+
}
|
536 |
+
|
537 |
+
template <typename Type>
|
538 |
+
inline const Type& MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::
|
539 |
+
DefaultIfNotInitialized(const Type* value, const Type* default_value) {
|
540 |
+
return value != NULL ? *value : *default_value;
|
541 |
+
}
|
542 |
+
|
543 |
+
template <typename Type>
|
544 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_MESSAGE,
|
545 |
+
Type>::IsInitialized(Type* value) {
|
546 |
+
return value->IsInitialized();
|
547 |
+
}
|
548 |
+
|
549 |
+
// Definition for string/bytes handler
|
550 |
+
|
551 |
+
#define STRING_OR_BYTES_HANDLER_FUNCTIONS(FieldType) \
|
552 |
+
template <typename Type> \
|
553 |
+
inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
554 |
+
Type>::MapEntryAccessorType& \
|
555 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
556 |
+
Type>::GetExternalReference(const TypeOnMemory& value) { \
|
557 |
+
return value.Get(); \
|
558 |
+
} \
|
559 |
+
template <typename Type> \
|
560 |
+
inline size_t \
|
561 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
562 |
+
Type>::SpaceUsedInMapEntryLong(const TypeOnMemory& value) { \
|
563 |
+
return sizeof(value); \
|
564 |
+
} \
|
565 |
+
template <typename Type> \
|
566 |
+
inline size_t \
|
567 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::SpaceUsedInMapLong( \
|
568 |
+
const TypeOnMemory& value) { \
|
569 |
+
return sizeof(value); \
|
570 |
+
} \
|
571 |
+
template <typename Type> \
|
572 |
+
inline size_t \
|
573 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::SpaceUsedInMapLong( \
|
574 |
+
const string& value) { \
|
575 |
+
return sizeof(value); \
|
576 |
+
} \
|
577 |
+
template <typename Type> \
|
578 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Clear( \
|
579 |
+
TypeOnMemory* value, Arena* arena) { \
|
580 |
+
value->ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), \
|
581 |
+
arena); \
|
582 |
+
} \
|
583 |
+
template <typename Type> \
|
584 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>:: \
|
585 |
+
ClearMaybeByDefaultEnum(TypeOnMemory* value, Arena* arena, \
|
586 |
+
int /* default_enum */) { \
|
587 |
+
Clear(value, arena); \
|
588 |
+
} \
|
589 |
+
template <typename Type> \
|
590 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Merge( \
|
591 |
+
const MapEntryAccessorType& from, TypeOnMemory* to, Arena* arena) { \
|
592 |
+
to->Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from, arena); \
|
593 |
+
} \
|
594 |
+
template <typename Type> \
|
595 |
+
void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::DeleteNoArena( \
|
596 |
+
TypeOnMemory& value) { \
|
597 |
+
value.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); \
|
598 |
+
} \
|
599 |
+
template <typename Type> \
|
600 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
601 |
+
Type>::AssignDefaultValue(TypeOnMemory* /* value */) {} \
|
602 |
+
template <typename Type> \
|
603 |
+
inline void \
|
604 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Initialize( \
|
605 |
+
TypeOnMemory* value, Arena* /* arena */) { \
|
606 |
+
value->UnsafeSetDefault( \
|
607 |
+
&::google::protobuf::internal::GetEmptyStringAlreadyInited()); \
|
608 |
+
} \
|
609 |
+
template <typename Type> \
|
610 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>:: \
|
611 |
+
InitializeMaybeByDefaultEnum(TypeOnMemory* value, \
|
612 |
+
int /* default_enum_value */, \
|
613 |
+
Arena* arena) { \
|
614 |
+
Initialize(value, arena); \
|
615 |
+
} \
|
616 |
+
template <typename Type> \
|
617 |
+
inline typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
618 |
+
Type>::MapEntryAccessorType* \
|
619 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::EnsureMutable( \
|
620 |
+
TypeOnMemory* value, Arena* arena) { \
|
621 |
+
return value->Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), \
|
622 |
+
arena); \
|
623 |
+
} \
|
624 |
+
template <typename Type> \
|
625 |
+
inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
626 |
+
Type>::MapEntryAccessorType& \
|
627 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
628 |
+
Type>::DefaultIfNotInitialized(const TypeOnMemory& value, \
|
629 |
+
const TypeOnMemory& \
|
630 |
+
/* default_value */) { \
|
631 |
+
return value.Get(); \
|
632 |
+
} \
|
633 |
+
template <typename Type> \
|
634 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
635 |
+
Type>::IsInitialized(const TypeOnMemory& /* value */) { \
|
636 |
+
return true; \
|
637 |
+
}
|
638 |
+
STRING_OR_BYTES_HANDLER_FUNCTIONS(STRING)
|
639 |
+
STRING_OR_BYTES_HANDLER_FUNCTIONS(BYTES)
|
640 |
+
#undef STRING_OR_BYTES_HANDLER_FUNCTIONS
|
641 |
+
|
642 |
+
#define PRIMITIVE_HANDLER_FUNCTIONS(FieldType) \
|
643 |
+
template <typename Type> \
|
644 |
+
inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
645 |
+
Type>::MapEntryAccessorType& \
|
646 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
647 |
+
Type>::GetExternalReference(const TypeOnMemory& value) { \
|
648 |
+
return value; \
|
649 |
+
} \
|
650 |
+
template <typename Type> \
|
651 |
+
inline size_t \
|
652 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
653 |
+
Type>::SpaceUsedInMapEntryLong(const TypeOnMemory& /* value */) { \
|
654 |
+
return 0; \
|
655 |
+
} \
|
656 |
+
template <typename Type> \
|
657 |
+
inline size_t \
|
658 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::SpaceUsedInMapLong( \
|
659 |
+
const TypeOnMemory& /* value */) { \
|
660 |
+
return sizeof(Type); \
|
661 |
+
} \
|
662 |
+
template <typename Type> \
|
663 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Clear( \
|
664 |
+
TypeOnMemory* value, Arena* /* arena */) { \
|
665 |
+
*value = 0; \
|
666 |
+
} \
|
667 |
+
template <typename Type> \
|
668 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>:: \
|
669 |
+
ClearMaybeByDefaultEnum(TypeOnMemory* value, Arena* /* arena */, \
|
670 |
+
int default_enum_value) { \
|
671 |
+
*value = static_cast<TypeOnMemory>(default_enum_value); \
|
672 |
+
} \
|
673 |
+
template <typename Type> \
|
674 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Merge( \
|
675 |
+
const MapEntryAccessorType& from, TypeOnMemory* to, \
|
676 |
+
Arena* /* arena */) { \
|
677 |
+
*to = from; \
|
678 |
+
} \
|
679 |
+
template <typename Type> \
|
680 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
681 |
+
Type>::DeleteNoArena(TypeOnMemory& /* x */) {} \
|
682 |
+
template <typename Type> \
|
683 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
684 |
+
Type>::AssignDefaultValue(TypeOnMemory* /* value */) {} \
|
685 |
+
template <typename Type> \
|
686 |
+
inline void \
|
687 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Initialize( \
|
688 |
+
TypeOnMemory* value, Arena* /* arena */) { \
|
689 |
+
*value = 0; \
|
690 |
+
} \
|
691 |
+
template <typename Type> \
|
692 |
+
inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>:: \
|
693 |
+
InitializeMaybeByDefaultEnum(TypeOnMemory* value, \
|
694 |
+
int default_enum_value, \
|
695 |
+
Arena* /* arena */) { \
|
696 |
+
*value = static_cast<TypeOnMemory>(default_enum_value); \
|
697 |
+
} \
|
698 |
+
template <typename Type> \
|
699 |
+
inline typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
700 |
+
Type>::MapEntryAccessorType* \
|
701 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::EnsureMutable( \
|
702 |
+
TypeOnMemory* value, Arena* /* arena */) { \
|
703 |
+
return value; \
|
704 |
+
} \
|
705 |
+
template <typename Type> \
|
706 |
+
inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
707 |
+
Type>::MapEntryAccessorType& \
|
708 |
+
MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
709 |
+
Type>::DefaultIfNotInitialized(const TypeOnMemory& value, \
|
710 |
+
const TypeOnMemory& \
|
711 |
+
/* default_value */) { \
|
712 |
+
return value; \
|
713 |
+
} \
|
714 |
+
template <typename Type> \
|
715 |
+
inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType, \
|
716 |
+
Type>::IsInitialized(const TypeOnMemory& /* value */) { \
|
717 |
+
return true; \
|
718 |
+
}
|
719 |
+
PRIMITIVE_HANDLER_FUNCTIONS(INT64)
|
720 |
+
PRIMITIVE_HANDLER_FUNCTIONS(UINT64)
|
721 |
+
PRIMITIVE_HANDLER_FUNCTIONS(INT32)
|
722 |
+
PRIMITIVE_HANDLER_FUNCTIONS(UINT32)
|
723 |
+
PRIMITIVE_HANDLER_FUNCTIONS(SINT64)
|
724 |
+
PRIMITIVE_HANDLER_FUNCTIONS(SINT32)
|
725 |
+
PRIMITIVE_HANDLER_FUNCTIONS(ENUM)
|
726 |
+
PRIMITIVE_HANDLER_FUNCTIONS(DOUBLE)
|
727 |
+
PRIMITIVE_HANDLER_FUNCTIONS(FLOAT)
|
728 |
+
PRIMITIVE_HANDLER_FUNCTIONS(FIXED64)
|
729 |
+
PRIMITIVE_HANDLER_FUNCTIONS(FIXED32)
|
730 |
+
PRIMITIVE_HANDLER_FUNCTIONS(SFIXED64)
|
731 |
+
PRIMITIVE_HANDLER_FUNCTIONS(SFIXED32)
|
732 |
+
PRIMITIVE_HANDLER_FUNCTIONS(BOOL)
|
733 |
+
#undef PRIMITIVE_HANDLER_FUNCTIONS
|
734 |
+
|
735 |
+
} // namespace internal
|
736 |
+
} // namespace protobuf
|
737 |
+
|
738 |
+
} // namespace google
|
739 |
+
#endif // GOOGLE_PROTOBUF_TYPE_HANDLER_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h
ADDED
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Authors: [email protected] (Wink Saville),
|
32 |
+
// [email protected] (Kenton Varda)
|
33 |
+
// Based on original Protocol Buffers design by
|
34 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
35 |
+
//
|
36 |
+
// Defines MessageLite, the abstract interface implemented by all (lite
|
37 |
+
// and non-lite) protocol message objects.
|
38 |
+
|
39 |
+
#ifndef GOOGLE_PROTOBUF_MESSAGE_LITE_H__
|
40 |
+
#define GOOGLE_PROTOBUF_MESSAGE_LITE_H__
|
41 |
+
|
42 |
+
#include <climits>
|
43 |
+
#include <google/protobuf/stubs/common.h>
|
44 |
+
#include <google/protobuf/stubs/logging.h>
|
45 |
+
#include <google/protobuf/stubs/once.h>
|
46 |
+
#include <google/protobuf/arena.h>
|
47 |
+
#include <google/protobuf/stubs/port.h>
|
48 |
+
|
49 |
+
namespace google {
|
50 |
+
namespace protobuf {
|
51 |
+
template <typename T>
|
52 |
+
class RepeatedPtrField;
|
53 |
+
namespace io {
|
54 |
+
class CodedInputStream;
|
55 |
+
class CodedOutputStream;
|
56 |
+
class ZeroCopyInputStream;
|
57 |
+
class ZeroCopyOutputStream;
|
58 |
+
}
|
59 |
+
namespace internal {
|
60 |
+
|
61 |
+
class RepeatedPtrFieldBase;
|
62 |
+
class WireFormatLite;
|
63 |
+
class WeakFieldMap;
|
64 |
+
|
65 |
+
#ifndef SWIG
|
66 |
+
// We compute sizes as size_t but cache them as int. This function converts a
|
67 |
+
// computed size to a cached size. Since we don't proceed with serialization
|
68 |
+
// if the total size was > INT_MAX, it is not important what this function
|
69 |
+
// returns for inputs > INT_MAX. However this case should not error or
|
70 |
+
// GOOGLE_CHECK-fail, because the full size_t resolution is still returned from
|
71 |
+
// ByteSizeLong() and checked against INT_MAX; we can catch the overflow
|
72 |
+
// there.
|
73 |
+
inline int ToCachedSize(size_t size) { return static_cast<int>(size); }
|
74 |
+
|
75 |
+
// We mainly calculate sizes in terms of size_t, but some functions that
|
76 |
+
// compute sizes return "int". These int sizes are expected to always be
|
77 |
+
// positive. This function is more efficient than casting an int to size_t
|
78 |
+
// directly on 64-bit platforms because it avoids making the compiler emit a
|
79 |
+
// sign extending instruction, which we don't want and don't want to pay for.
|
80 |
+
inline size_t FromIntSize(int size) {
|
81 |
+
// Convert to unsigned before widening so sign extension is not necessary.
|
82 |
+
return static_cast<unsigned int>(size);
|
83 |
+
}
|
84 |
+
|
85 |
+
// For cases where a legacy function returns an integer size. We GOOGLE_DCHECK()
|
86 |
+
// that the conversion will fit within an integer; if this is false then we
|
87 |
+
// are losing information.
|
88 |
+
inline int ToIntSize(size_t size) {
|
89 |
+
GOOGLE_DCHECK_LE(size, static_cast<size_t>(INT_MAX));
|
90 |
+
return static_cast<int>(size);
|
91 |
+
}
|
92 |
+
|
93 |
+
// This type wraps a variable whose constructor and destructor are explicitly
|
94 |
+
// called. It is particularly useful for a global variable, without its
|
95 |
+
// constructor and destructor run on start and end of the program lifetime.
|
96 |
+
// This circumvents the initial construction order fiasco, while keeping
|
97 |
+
// the address of the empty string a compile time constant.
|
98 |
+
//
|
99 |
+
// Pay special attention to the initialization state of the object.
|
100 |
+
// 1. The object is "uninitialized" to begin with.
|
101 |
+
// 2. Call DefaultConstruct() only if the object is uninitialized.
|
102 |
+
// After the call, the object becomes "initialized".
|
103 |
+
// 3. Call get() and get_mutable() only if the object is initialized.
|
104 |
+
// 4. Call Destruct() only if the object is initialized.
|
105 |
+
// After the call, the object becomes uninitialized.
|
106 |
+
template <typename T>
|
107 |
+
class ExplicitlyConstructed {
|
108 |
+
public:
|
109 |
+
void DefaultConstruct() {
|
110 |
+
new (&union_) T();
|
111 |
+
}
|
112 |
+
|
113 |
+
void Destruct() {
|
114 |
+
get_mutable()->~T();
|
115 |
+
}
|
116 |
+
|
117 |
+
constexpr const T& get() const { return reinterpret_cast<const T&>(union_); }
|
118 |
+
T* get_mutable() { return reinterpret_cast<T*>(&union_); }
|
119 |
+
|
120 |
+
private:
|
121 |
+
// Prefer c++14 aligned_storage, but for compatibility this will do.
|
122 |
+
union AlignedUnion {
|
123 |
+
char space[sizeof(T)];
|
124 |
+
int64 align_to_int64;
|
125 |
+
void* align_to_ptr;
|
126 |
+
} union_;
|
127 |
+
};
|
128 |
+
|
129 |
+
// Default empty string object. Don't use this directly. Instead, call
|
130 |
+
// GetEmptyString() to get the reference.
|
131 |
+
LIBPROTOBUF_EXPORT extern ExplicitlyConstructed<::std::string> fixed_address_empty_string;
|
132 |
+
|
133 |
+
LIBPROTOBUF_EXPORT inline const ::std::string& GetEmptyStringAlreadyInited() {
|
134 |
+
return fixed_address_empty_string.get();
|
135 |
+
}
|
136 |
+
|
137 |
+
LIBPROTOBUF_EXPORT size_t StringSpaceUsedExcludingSelfLong(const string& str);
|
138 |
+
#endif // SWIG
|
139 |
+
} // namespace internal
|
140 |
+
|
141 |
+
// Interface to light weight protocol messages.
|
142 |
+
//
|
143 |
+
// This interface is implemented by all protocol message objects. Non-lite
|
144 |
+
// messages additionally implement the Message interface, which is a
|
145 |
+
// subclass of MessageLite. Use MessageLite instead when you only need
|
146 |
+
// the subset of features which it supports -- namely, nothing that uses
|
147 |
+
// descriptors or reflection. You can instruct the protocol compiler
|
148 |
+
// to generate classes which implement only MessageLite, not the full
|
149 |
+
// Message interface, by adding the following line to the .proto file:
|
150 |
+
//
|
151 |
+
// option optimize_for = LITE_RUNTIME;
|
152 |
+
//
|
153 |
+
// This is particularly useful on resource-constrained systems where
|
154 |
+
// the full protocol buffers runtime library is too big.
|
155 |
+
//
|
156 |
+
// Note that on non-constrained systems (e.g. servers) when you need
|
157 |
+
// to link in lots of protocol definitions, a better way to reduce
|
158 |
+
// total code footprint is to use optimize_for = CODE_SIZE. This
|
159 |
+
// will make the generated code smaller while still supporting all the
|
160 |
+
// same features (at the expense of speed). optimize_for = LITE_RUNTIME
|
161 |
+
// is best when you only have a small number of message types linked
|
162 |
+
// into your binary, in which case the size of the protocol buffers
|
163 |
+
// runtime itself is the biggest problem.
|
164 |
+
class LIBPROTOBUF_EXPORT MessageLite {
|
165 |
+
public:
|
166 |
+
inline MessageLite() {}
|
167 |
+
virtual ~MessageLite() {}
|
168 |
+
|
169 |
+
// Basic Operations ------------------------------------------------
|
170 |
+
|
171 |
+
// Get the name of this message type, e.g. "foo.bar.BazProto".
|
172 |
+
virtual string GetTypeName() const = 0;
|
173 |
+
|
174 |
+
// Construct a new instance of the same type. Ownership is passed to the
|
175 |
+
// caller.
|
176 |
+
virtual MessageLite* New() const = 0;
|
177 |
+
|
178 |
+
// Construct a new instance on the arena. Ownership is passed to the caller
|
179 |
+
// if arena is a NULL. Default implementation for backwards compatibility.
|
180 |
+
virtual MessageLite* New(::google::protobuf::Arena* arena) const;
|
181 |
+
|
182 |
+
// Get the arena, if any, associated with this message. Virtual method
|
183 |
+
// required for generic operations but most arena-related operations should
|
184 |
+
// use the GetArenaNoVirtual() generated-code method. Default implementation
|
185 |
+
// to reduce code size by avoiding the need for per-type implementations
|
186 |
+
// when types do not implement arena support.
|
187 |
+
virtual ::google::protobuf::Arena* GetArena() const { return NULL; }
|
188 |
+
|
189 |
+
// Get a pointer that may be equal to this message's arena, or may not be.
|
190 |
+
// If the value returned by this method is equal to some arena pointer, then
|
191 |
+
// this message is on that arena; however, if this message is on some arena,
|
192 |
+
// this method may or may not return that arena's pointer. As a tradeoff,
|
193 |
+
// this method may be more efficient than GetArena(). The intent is to allow
|
194 |
+
// underlying representations that use e.g. tagged pointers to sometimes
|
195 |
+
// store the arena pointer directly, and sometimes in a more indirect way,
|
196 |
+
// and allow a fastpath comparison against the arena pointer when it's easy
|
197 |
+
// to obtain.
|
198 |
+
virtual void* GetMaybeArenaPointer() const { return GetArena(); }
|
199 |
+
|
200 |
+
// Clear all fields of the message and set them to their default values.
|
201 |
+
// Clear() avoids freeing memory, assuming that any memory allocated
|
202 |
+
// to hold parts of the message will be needed again to hold the next
|
203 |
+
// message. If you actually want to free the memory used by a Message,
|
204 |
+
// you must delete it.
|
205 |
+
virtual void Clear() = 0;
|
206 |
+
|
207 |
+
// Quickly check if all required fields have values set.
|
208 |
+
virtual bool IsInitialized() const = 0;
|
209 |
+
|
210 |
+
// This is not implemented for Lite messages -- it just returns "(cannot
|
211 |
+
// determine missing fields for lite message)". However, it is implemented
|
212 |
+
// for full messages. See message.h.
|
213 |
+
virtual string InitializationErrorString() const;
|
214 |
+
|
215 |
+
// If |other| is the exact same class as this, calls MergeFrom(). Otherwise,
|
216 |
+
// results are undefined (probably crash).
|
217 |
+
virtual void CheckTypeAndMergeFrom(const MessageLite& other) = 0;
|
218 |
+
|
219 |
+
// Parsing ---------------------------------------------------------
|
220 |
+
// Methods for parsing in protocol buffer format. Most of these are
|
221 |
+
// just simple wrappers around MergeFromCodedStream(). Clear() will be
|
222 |
+
// called before merging the input.
|
223 |
+
|
224 |
+
// Fill the message with a protocol buffer parsed from the given input
|
225 |
+
// stream. Returns false on a read error or if the input is in the wrong
|
226 |
+
// format. A successful return does not indicate the entire input is
|
227 |
+
// consumed, ensure you call ConsumedEntireMessage() to check that if
|
228 |
+
// applicable.
|
229 |
+
bool ParseFromCodedStream(io::CodedInputStream* input);
|
230 |
+
// Like ParseFromCodedStream(), but accepts messages that are missing
|
231 |
+
// required fields.
|
232 |
+
bool ParsePartialFromCodedStream(io::CodedInputStream* input);
|
233 |
+
// Read a protocol buffer from the given zero-copy input stream. If
|
234 |
+
// successful, the entire input will be consumed.
|
235 |
+
bool ParseFromZeroCopyStream(io::ZeroCopyInputStream* input);
|
236 |
+
// Like ParseFromZeroCopyStream(), but accepts messages that are missing
|
237 |
+
// required fields.
|
238 |
+
bool ParsePartialFromZeroCopyStream(io::ZeroCopyInputStream* input);
|
239 |
+
// Read a protocol buffer from the given zero-copy input stream, expecting
|
240 |
+
// the message to be exactly "size" bytes long. If successful, exactly
|
241 |
+
// this many bytes will have been consumed from the input.
|
242 |
+
bool ParseFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input, int size);
|
243 |
+
// Like ParseFromBoundedZeroCopyStream(), but accepts messages that are
|
244 |
+
// missing required fields.
|
245 |
+
bool ParsePartialFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input,
|
246 |
+
int size);
|
247 |
+
// Parses a protocol buffer contained in a string. Returns true on success.
|
248 |
+
// This function takes a string in the (non-human-readable) binary wire
|
249 |
+
// format, matching the encoding output by MessageLite::SerializeToString().
|
250 |
+
// If you'd like to convert a human-readable string into a protocol buffer
|
251 |
+
// object, see google::protobuf::TextFormat::ParseFromString().
|
252 |
+
bool ParseFromString(const string& data);
|
253 |
+
// Like ParseFromString(), but accepts messages that are missing
|
254 |
+
// required fields.
|
255 |
+
bool ParsePartialFromString(const string& data);
|
256 |
+
// Parse a protocol buffer contained in an array of bytes.
|
257 |
+
bool ParseFromArray(const void* data, int size);
|
258 |
+
// Like ParseFromArray(), but accepts messages that are missing
|
259 |
+
// required fields.
|
260 |
+
bool ParsePartialFromArray(const void* data, int size);
|
261 |
+
|
262 |
+
|
263 |
+
// Reads a protocol buffer from the stream and merges it into this
|
264 |
+
// Message. Singular fields read from the what is
|
265 |
+
// already in the Message and repeated fields are appended to those
|
266 |
+
// already present.
|
267 |
+
//
|
268 |
+
// It is the responsibility of the caller to call input->LastTagWas()
|
269 |
+
// (for groups) or input->ConsumedEntireMessage() (for non-groups) after
|
270 |
+
// this returns to verify that the message's end was delimited correctly.
|
271 |
+
//
|
272 |
+
// ParsefromCodedStream() is implemented as Clear() followed by
|
273 |
+
// MergeFromCodedStream().
|
274 |
+
bool MergeFromCodedStream(io::CodedInputStream* input);
|
275 |
+
|
276 |
+
// Like MergeFromCodedStream(), but succeeds even if required fields are
|
277 |
+
// missing in the input.
|
278 |
+
//
|
279 |
+
// MergeFromCodedStream() is just implemented as MergePartialFromCodedStream()
|
280 |
+
// followed by IsInitialized().
|
281 |
+
virtual bool MergePartialFromCodedStream(io::CodedInputStream* input) = 0;
|
282 |
+
|
283 |
+
|
284 |
+
// Serialization ---------------------------------------------------
|
285 |
+
// Methods for serializing in protocol buffer format. Most of these
|
286 |
+
// are just simple wrappers around ByteSize() and SerializeWithCachedSizes().
|
287 |
+
|
288 |
+
// Write a protocol buffer of this message to the given output. Returns
|
289 |
+
// false on a write error. If the message is missing required fields,
|
290 |
+
// this may GOOGLE_CHECK-fail.
|
291 |
+
bool SerializeToCodedStream(io::CodedOutputStream* output) const;
|
292 |
+
// Like SerializeToCodedStream(), but allows missing required fields.
|
293 |
+
bool SerializePartialToCodedStream(io::CodedOutputStream* output) const;
|
294 |
+
// Write the message to the given zero-copy output stream. All required
|
295 |
+
// fields must be set.
|
296 |
+
bool SerializeToZeroCopyStream(io::ZeroCopyOutputStream* output) const;
|
297 |
+
// Like SerializeToZeroCopyStream(), but allows missing required fields.
|
298 |
+
bool SerializePartialToZeroCopyStream(io::ZeroCopyOutputStream* output) const;
|
299 |
+
// Serialize the message and store it in the given string. All required
|
300 |
+
// fields must be set.
|
301 |
+
bool SerializeToString(string* output) const;
|
302 |
+
// Like SerializeToString(), but allows missing required fields.
|
303 |
+
bool SerializePartialToString(string* output) const;
|
304 |
+
// Serialize the message and store it in the given byte array. All required
|
305 |
+
// fields must be set.
|
306 |
+
bool SerializeToArray(void* data, int size) const;
|
307 |
+
// Like SerializeToArray(), but allows missing required fields.
|
308 |
+
bool SerializePartialToArray(void* data, int size) const;
|
309 |
+
|
310 |
+
// Make a string encoding the message. Is equivalent to calling
|
311 |
+
// SerializeToString() on a string and using that. Returns the empty
|
312 |
+
// string if SerializeToString() would have returned an error.
|
313 |
+
// Note: If you intend to generate many such strings, you may
|
314 |
+
// reduce heap fragmentation by instead re-using the same string
|
315 |
+
// object with calls to SerializeToString().
|
316 |
+
string SerializeAsString() const;
|
317 |
+
// Like SerializeAsString(), but allows missing required fields.
|
318 |
+
string SerializePartialAsString() const;
|
319 |
+
|
320 |
+
// Like SerializeToString(), but appends to the data to the string's existing
|
321 |
+
// contents. All required fields must be set.
|
322 |
+
bool AppendToString(string* output) const;
|
323 |
+
// Like AppendToString(), but allows missing required fields.
|
324 |
+
bool AppendPartialToString(string* output) const;
|
325 |
+
|
326 |
+
// Computes the serialized size of the message. This recursively calls
|
327 |
+
// ByteSizeLong() on all embedded messages.
|
328 |
+
//
|
329 |
+
// ByteSizeLong() is generally linear in the number of fields defined for the
|
330 |
+
// proto.
|
331 |
+
virtual size_t ByteSizeLong() const = 0;
|
332 |
+
|
333 |
+
// Legacy ByteSize() API.
|
334 |
+
PROTOBUF_RUNTIME_DEPRECATED("Please use ByteSizeLong() instead")
|
335 |
+
int ByteSize() const {
|
336 |
+
return internal::ToIntSize(ByteSizeLong());
|
337 |
+
}
|
338 |
+
|
339 |
+
// Serializes the message without recomputing the size. The message must not
|
340 |
+
// have changed since the last call to ByteSize(), and the value returned by
|
341 |
+
// ByteSize must be non-negative. Otherwise the results are undefined.
|
342 |
+
virtual void SerializeWithCachedSizes(
|
343 |
+
io::CodedOutputStream* output) const;
|
344 |
+
|
345 |
+
// Functions below here are not part of the public interface. It isn't
|
346 |
+
// enforced, but they should be treated as private, and will be private
|
347 |
+
// at some future time. Unfortunately the implementation of the "friend"
|
348 |
+
// keyword in GCC is broken at the moment, but we expect it will be fixed.
|
349 |
+
|
350 |
+
// Like SerializeWithCachedSizes, but writes directly to *target, returning
|
351 |
+
// a pointer to the byte immediately after the last byte written. "target"
|
352 |
+
// must point at a byte array of at least ByteSize() bytes. Whether to use
|
353 |
+
// deterministic serialization, e.g., maps in sorted order, is determined by
|
354 |
+
// CodedOutputStream::IsDefaultSerializationDeterministic().
|
355 |
+
virtual uint8* SerializeWithCachedSizesToArray(uint8* target) const;
|
356 |
+
|
357 |
+
// Returns the result of the last call to ByteSize(). An embedded message's
|
358 |
+
// size is needed both to serialize it (because embedded messages are
|
359 |
+
// length-delimited) and to compute the outer message's size. Caching
|
360 |
+
// the size avoids computing it multiple times.
|
361 |
+
//
|
362 |
+
// ByteSize() does not automatically use the cached size when available
|
363 |
+
// because this would require invalidating it every time the message was
|
364 |
+
// modified, which would be too hard and expensive. (E.g. if a deeply-nested
|
365 |
+
// sub-message is changed, all of its parents' cached sizes would need to be
|
366 |
+
// invalidated, which is too much work for an otherwise inlined setter
|
367 |
+
// method.)
|
368 |
+
virtual int GetCachedSize() const = 0;
|
369 |
+
|
370 |
+
virtual uint8* InternalSerializeWithCachedSizesToArray(bool deterministic,
|
371 |
+
uint8* target) const;
|
372 |
+
|
373 |
+
protected:
|
374 |
+
// CastToBase allows generated code to cast a RepeatedPtrField<T> to
|
375 |
+
// RepeatedPtrFieldBase. We try to restrict access to RepeatedPtrFieldBase
|
376 |
+
// because it is an implementation detail that user code should not access
|
377 |
+
// directly.
|
378 |
+
template <typename T>
|
379 |
+
static ::google::protobuf::internal::RepeatedPtrFieldBase* CastToBase(
|
380 |
+
::google::protobuf::RepeatedPtrField<T>* repeated) {
|
381 |
+
return repeated;
|
382 |
+
}
|
383 |
+
template <typename T>
|
384 |
+
static const ::google::protobuf::internal::RepeatedPtrFieldBase& CastToBase(
|
385 |
+
const ::google::protobuf::RepeatedPtrField<T>& repeated) {
|
386 |
+
return repeated;
|
387 |
+
}
|
388 |
+
|
389 |
+
template <typename T>
|
390 |
+
static T* CreateMaybeMessage(Arena* arena) {
|
391 |
+
return Arena::CreateMaybeMessage<T>(arena);
|
392 |
+
}
|
393 |
+
|
394 |
+
private:
|
395 |
+
// TODO(gerbens) make this a pure abstract function
|
396 |
+
virtual const void* InternalGetTable() const { return NULL; }
|
397 |
+
|
398 |
+
friend class internal::WireFormatLite;
|
399 |
+
friend class Message;
|
400 |
+
friend class internal::WeakFieldMap;
|
401 |
+
|
402 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MessageLite);
|
403 |
+
};
|
404 |
+
|
405 |
+
namespace internal {
|
406 |
+
|
407 |
+
extern bool LIBPROTOBUF_EXPORT proto3_preserve_unknown_;
|
408 |
+
|
409 |
+
// DO NOT USE: For migration only. Will be removed when Proto3 defaults to
|
410 |
+
// preserve unknowns.
|
411 |
+
inline bool GetProto3PreserveUnknownsDefault() {
|
412 |
+
return proto3_preserve_unknown_;
|
413 |
+
}
|
414 |
+
|
415 |
+
// DO NOT USE: For migration only. Will be removed when Proto3 defaults to
|
416 |
+
// preserve unknowns.
|
417 |
+
void LIBPROTOBUF_EXPORT SetProto3PreserveUnknownsDefault(bool preserve);
|
418 |
+
} // namespace internal
|
419 |
+
|
420 |
+
|
421 |
+
} // namespace protobuf
|
422 |
+
|
423 |
+
} // namespace google
|
424 |
+
#endif // GOOGLE_PROTOBUF_MESSAGE_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/metadata_lite.h
ADDED
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_METADATA_LITE_H__
|
32 |
+
#define GOOGLE_PROTOBUF_METADATA_LITE_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/stubs/common.h>
|
35 |
+
#include <google/protobuf/arena.h>
|
36 |
+
#include <google/protobuf/message_lite.h>
|
37 |
+
#include <google/protobuf/stubs/port.h>
|
38 |
+
|
39 |
+
namespace google {
|
40 |
+
namespace protobuf {
|
41 |
+
namespace internal {
|
42 |
+
|
43 |
+
// This is the representation for messages that support arena allocation. It
|
44 |
+
// uses a tagged pointer to either store the Arena pointer, if there are no
|
45 |
+
// unknown fields, or a pointer to a block of memory with both the Arena pointer
|
46 |
+
// and the UnknownFieldSet, if there are unknown fields. This optimization
|
47 |
+
// allows for "zero-overhead" storage of the Arena pointer, relative to the
|
48 |
+
// above baseline implementation.
|
49 |
+
//
|
50 |
+
// The tagged pointer uses the LSB to disambiguate cases, and uses bit 0 == 0 to
|
51 |
+
// indicate an arena pointer and bit 0 == 1 to indicate a UFS+Arena-container
|
52 |
+
// pointer.
|
53 |
+
template <class T, class Derived>
|
54 |
+
class InternalMetadataWithArenaBase {
|
55 |
+
public:
|
56 |
+
InternalMetadataWithArenaBase() : ptr_(NULL) {}
|
57 |
+
explicit InternalMetadataWithArenaBase(Arena* arena) : ptr_(arena) {}
|
58 |
+
|
59 |
+
~InternalMetadataWithArenaBase() {
|
60 |
+
if (have_unknown_fields() && arena() == NULL) {
|
61 |
+
delete PtrValue<Container>();
|
62 |
+
}
|
63 |
+
ptr_ = NULL;
|
64 |
+
}
|
65 |
+
|
66 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE const T& unknown_fields() const {
|
67 |
+
if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) {
|
68 |
+
return PtrValue<Container>()->unknown_fields;
|
69 |
+
} else {
|
70 |
+
return Derived::default_instance();
|
71 |
+
}
|
72 |
+
}
|
73 |
+
|
74 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE T* mutable_unknown_fields() {
|
75 |
+
if (GOOGLE_PREDICT_TRUE(have_unknown_fields())) {
|
76 |
+
return &PtrValue<Container>()->unknown_fields;
|
77 |
+
} else {
|
78 |
+
return mutable_unknown_fields_slow();
|
79 |
+
}
|
80 |
+
}
|
81 |
+
|
82 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE Arena* arena() const {
|
83 |
+
if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) {
|
84 |
+
return PtrValue<Container>()->arena;
|
85 |
+
} else {
|
86 |
+
return PtrValue<Arena>();
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE bool have_unknown_fields() const {
|
91 |
+
return PtrTag() == kTagContainer;
|
92 |
+
}
|
93 |
+
|
94 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Swap(Derived* other) {
|
95 |
+
// Semantics here are that we swap only the unknown fields, not the arena
|
96 |
+
// pointer. We cannot simply swap ptr_ with other->ptr_ because we need to
|
97 |
+
// maintain our own arena ptr. Also, our ptr_ and other's ptr_ may be in
|
98 |
+
// different states (direct arena pointer vs. container with UFS) so we
|
99 |
+
// cannot simply swap ptr_ and then restore the arena pointers. We reuse
|
100 |
+
// UFS's swap implementation instead.
|
101 |
+
if (have_unknown_fields() || other->have_unknown_fields()) {
|
102 |
+
static_cast<Derived*>(this)->DoSwap(other->mutable_unknown_fields());
|
103 |
+
}
|
104 |
+
}
|
105 |
+
|
106 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void MergeFrom(const Derived& other) {
|
107 |
+
if (other.have_unknown_fields()) {
|
108 |
+
static_cast<Derived*>(this)->DoMergeFrom(other.unknown_fields());
|
109 |
+
}
|
110 |
+
}
|
111 |
+
|
112 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void Clear() {
|
113 |
+
if (have_unknown_fields()) {
|
114 |
+
static_cast<Derived*>(this)->DoClear();
|
115 |
+
}
|
116 |
+
}
|
117 |
+
|
118 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE void* raw_arena_ptr() const {
|
119 |
+
return ptr_;
|
120 |
+
}
|
121 |
+
|
122 |
+
private:
|
123 |
+
void* ptr_;
|
124 |
+
|
125 |
+
// Tagged pointer implementation.
|
126 |
+
enum {
|
127 |
+
// ptr_ is an Arena*.
|
128 |
+
kTagArena = 0,
|
129 |
+
// ptr_ is a Container*.
|
130 |
+
kTagContainer = 1,
|
131 |
+
};
|
132 |
+
static const intptr_t kPtrTagMask = 1;
|
133 |
+
static const intptr_t kPtrValueMask = ~kPtrTagMask;
|
134 |
+
|
135 |
+
// Accessors for pointer tag and pointer value.
|
136 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE int PtrTag() const {
|
137 |
+
return reinterpret_cast<intptr_t>(ptr_) & kPtrTagMask;
|
138 |
+
}
|
139 |
+
|
140 |
+
template<typename U> U* PtrValue() const {
|
141 |
+
return reinterpret_cast<U*>(
|
142 |
+
reinterpret_cast<intptr_t>(ptr_) & kPtrValueMask);
|
143 |
+
}
|
144 |
+
|
145 |
+
// If ptr_'s tag is kTagContainer, it points to an instance of this struct.
|
146 |
+
struct Container {
|
147 |
+
T unknown_fields;
|
148 |
+
Arena* arena;
|
149 |
+
};
|
150 |
+
|
151 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE T* mutable_unknown_fields_slow() {
|
152 |
+
Arena* my_arena = arena();
|
153 |
+
Container* container = Arena::Create<Container>(my_arena);
|
154 |
+
// Two-step assignment works around a bug in clang's static analyzer:
|
155 |
+
// https://bugs.llvm.org/show_bug.cgi?id=34198.
|
156 |
+
ptr_ = container;
|
157 |
+
ptr_ = reinterpret_cast<void*>(
|
158 |
+
reinterpret_cast<intptr_t>(ptr_) | kTagContainer);
|
159 |
+
container->arena = my_arena;
|
160 |
+
return &(container->unknown_fields);
|
161 |
+
}
|
162 |
+
};
|
163 |
+
|
164 |
+
// We store unknown fields as a string right now, because there is currently no
|
165 |
+
// good interface for reading unknown fields into an ArenaString. We may want
|
166 |
+
// to revisit this to allow unknown fields to be parsed onto the Arena.
|
167 |
+
class InternalMetadataWithArenaLite
|
168 |
+
: public InternalMetadataWithArenaBase<string,
|
169 |
+
InternalMetadataWithArenaLite> {
|
170 |
+
public:
|
171 |
+
InternalMetadataWithArenaLite() {}
|
172 |
+
|
173 |
+
explicit InternalMetadataWithArenaLite(Arena* arena)
|
174 |
+
: InternalMetadataWithArenaBase<string,
|
175 |
+
InternalMetadataWithArenaLite>(arena) {}
|
176 |
+
|
177 |
+
void DoSwap(string* other) {
|
178 |
+
mutable_unknown_fields()->swap(*other);
|
179 |
+
}
|
180 |
+
|
181 |
+
void DoMergeFrom(const string& other) {
|
182 |
+
mutable_unknown_fields()->append(other);
|
183 |
+
}
|
184 |
+
|
185 |
+
void DoClear() {
|
186 |
+
mutable_unknown_fields()->clear();
|
187 |
+
}
|
188 |
+
|
189 |
+
static const string& default_instance() {
|
190 |
+
return GetEmptyStringAlreadyInited();
|
191 |
+
}
|
192 |
+
};
|
193 |
+
|
194 |
+
// This helper RAII class is needed to efficiently parse unknown fields. We
|
195 |
+
// should only call mutable_unknown_fields if there are actual unknown fields.
|
196 |
+
// The obvious thing to just use a stack string and swap it at the end of the
|
197 |
+
// parse won't work, because the destructor of StringOutputStream needs to be
|
198 |
+
// called before we can modify the string (it check-fails). Using
|
199 |
+
// LiteUnknownFieldSetter setter(&_internal_metadata_);
|
200 |
+
// StringOutputStream stream(setter.buffer());
|
201 |
+
// guarantees that the string is only swapped after stream is destroyed.
|
202 |
+
class LIBPROTOBUF_EXPORT LiteUnknownFieldSetter {
|
203 |
+
public:
|
204 |
+
explicit LiteUnknownFieldSetter(InternalMetadataWithArenaLite* metadata)
|
205 |
+
: metadata_(metadata) {
|
206 |
+
if (metadata->have_unknown_fields()) {
|
207 |
+
buffer_.swap(*metadata->mutable_unknown_fields());
|
208 |
+
}
|
209 |
+
}
|
210 |
+
~LiteUnknownFieldSetter() {
|
211 |
+
if (!buffer_.empty()) metadata_->mutable_unknown_fields()->swap(buffer_);
|
212 |
+
}
|
213 |
+
string* buffer() { return &buffer_; }
|
214 |
+
|
215 |
+
private:
|
216 |
+
InternalMetadataWithArenaLite* metadata_;
|
217 |
+
string buffer_;
|
218 |
+
};
|
219 |
+
|
220 |
+
} // namespace internal
|
221 |
+
} // namespace protobuf
|
222 |
+
|
223 |
+
} // namespace google
|
224 |
+
#endif // GOOGLE_PROTOBUF_METADATA_LITE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/repeated_field.h
ADDED
@@ -0,0 +1,2630 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
// Based on original Protocol Buffers design by
|
33 |
+
// Sanjay Ghemawat, Jeff Dean, and others.
|
34 |
+
//
|
35 |
+
// RepeatedField and RepeatedPtrField are used by generated protocol message
|
36 |
+
// classes to manipulate repeated fields. These classes are very similar to
|
37 |
+
// STL's vector, but include a number of optimizations found to be useful
|
38 |
+
// specifically in the case of Protocol Buffers. RepeatedPtrField is
|
39 |
+
// particularly different from STL vector as it manages ownership of the
|
40 |
+
// pointers that it contains.
|
41 |
+
//
|
42 |
+
// Typically, clients should not need to access RepeatedField objects directly,
|
43 |
+
// but should instead use the accessor functions generated automatically by the
|
44 |
+
// protocol compiler.
|
45 |
+
|
46 |
+
#ifndef GOOGLE_PROTOBUF_REPEATED_FIELD_H__
|
47 |
+
#define GOOGLE_PROTOBUF_REPEATED_FIELD_H__
|
48 |
+
|
49 |
+
#ifdef _MSC_VER
|
50 |
+
// This is required for min/max on VS2013 only.
|
51 |
+
#include <algorithm>
|
52 |
+
#endif
|
53 |
+
|
54 |
+
#include <iterator>
|
55 |
+
#include <limits>
|
56 |
+
#include <string>
|
57 |
+
#include <google/protobuf/stubs/casts.h>
|
58 |
+
#include <google/protobuf/stubs/logging.h>
|
59 |
+
#include <google/protobuf/stubs/common.h>
|
60 |
+
#include <google/protobuf/arena.h>
|
61 |
+
#include <google/protobuf/implicit_weak_message.h>
|
62 |
+
#include <google/protobuf/message_lite.h>
|
63 |
+
#include <google/protobuf/stubs/port.h>
|
64 |
+
#include <type_traits>
|
65 |
+
|
66 |
+
|
67 |
+
// Forward-declare these so that we can make them friends.
|
68 |
+
namespace google {
|
69 |
+
namespace upb {
|
70 |
+
namespace google_opensource {
|
71 |
+
class GMR_Handlers;
|
72 |
+
} // namespace google_opensource
|
73 |
+
} // namespace upb
|
74 |
+
|
75 |
+
namespace protobuf {
|
76 |
+
|
77 |
+
class Message;
|
78 |
+
|
79 |
+
namespace internal {
|
80 |
+
|
81 |
+
class MergePartialFromCodedStreamHelper;
|
82 |
+
|
83 |
+
static const int kMinRepeatedFieldAllocationSize = 4;
|
84 |
+
|
85 |
+
// A utility function for logging that doesn't need any template types.
|
86 |
+
void LogIndexOutOfBounds(int index, int size);
|
87 |
+
|
88 |
+
template <typename Iter>
|
89 |
+
inline int CalculateReserve(Iter begin, Iter end, std::forward_iterator_tag) {
|
90 |
+
return static_cast<int>(std::distance(begin, end));
|
91 |
+
}
|
92 |
+
|
93 |
+
template <typename Iter>
|
94 |
+
inline int CalculateReserve(Iter /*begin*/, Iter /*end*/,
|
95 |
+
std::input_iterator_tag /*unused*/) {
|
96 |
+
return -1;
|
97 |
+
}
|
98 |
+
|
99 |
+
template <typename Iter>
|
100 |
+
inline int CalculateReserve(Iter begin, Iter end) {
|
101 |
+
typedef typename std::iterator_traits<Iter>::iterator_category Category;
|
102 |
+
return CalculateReserve(begin, end, Category());
|
103 |
+
}
|
104 |
+
} // namespace internal
|
105 |
+
|
106 |
+
|
107 |
+
// RepeatedField is used to represent repeated fields of a primitive type (in
|
108 |
+
// other words, everything except strings and nested Messages). Most users will
|
109 |
+
// not ever use a RepeatedField directly; they will use the get-by-index,
|
110 |
+
// set-by-index, and add accessors that are generated for all repeated fields.
|
111 |
+
template <typename Element>
|
112 |
+
class RepeatedField final {
|
113 |
+
public:
|
114 |
+
RepeatedField();
|
115 |
+
explicit RepeatedField(Arena* arena);
|
116 |
+
RepeatedField(const RepeatedField& other);
|
117 |
+
template <typename Iter>
|
118 |
+
RepeatedField(Iter begin, const Iter& end);
|
119 |
+
~RepeatedField();
|
120 |
+
|
121 |
+
RepeatedField& operator=(const RepeatedField& other);
|
122 |
+
|
123 |
+
RepeatedField(RepeatedField&& other) noexcept;
|
124 |
+
RepeatedField& operator=(RepeatedField&& other) noexcept;
|
125 |
+
|
126 |
+
bool empty() const;
|
127 |
+
int size() const;
|
128 |
+
|
129 |
+
const Element& Get(int index) const;
|
130 |
+
Element* Mutable(int index);
|
131 |
+
|
132 |
+
const Element& operator[](int index) const { return Get(index); }
|
133 |
+
Element& operator[](int index) { return *Mutable(index); }
|
134 |
+
|
135 |
+
void Set(int index, const Element& value);
|
136 |
+
void Add(const Element& value);
|
137 |
+
// Appends a new element and return a pointer to it.
|
138 |
+
// The new element is uninitialized if |Element| is a POD type.
|
139 |
+
Element* Add();
|
140 |
+
// Remove the last element in the array.
|
141 |
+
void RemoveLast();
|
142 |
+
|
143 |
+
// Extract elements with indices in "[start .. start+num-1]".
|
144 |
+
// Copy them into "elements[0 .. num-1]" if "elements" is not NULL.
|
145 |
+
// Caution: implementation also moves elements with indices [start+num ..].
|
146 |
+
// Calling this routine inside a loop can cause quadratic behavior.
|
147 |
+
void ExtractSubrange(int start, int num, Element* elements);
|
148 |
+
|
149 |
+
void Clear();
|
150 |
+
void MergeFrom(const RepeatedField& other);
|
151 |
+
void CopyFrom(const RepeatedField& other);
|
152 |
+
|
153 |
+
// Reserve space to expand the field to at least the given size. If the
|
154 |
+
// array is grown, it will always be at least doubled in size.
|
155 |
+
void Reserve(int new_size);
|
156 |
+
|
157 |
+
// Resize the RepeatedField to a new, smaller size. This is O(1).
|
158 |
+
void Truncate(int new_size);
|
159 |
+
|
160 |
+
void AddAlreadyReserved(const Element& value);
|
161 |
+
// Appends a new element and return a pointer to it.
|
162 |
+
// The new element is uninitialized if |Element| is a POD type.
|
163 |
+
// Should be called only if Capacity() > Size().
|
164 |
+
Element* AddAlreadyReserved();
|
165 |
+
Element* AddNAlreadyReserved(int elements);
|
166 |
+
int Capacity() const;
|
167 |
+
|
168 |
+
// Like STL resize. Uses value to fill appended elements.
|
169 |
+
// Like Truncate() if new_size <= size(), otherwise this is
|
170 |
+
// O(new_size - size()).
|
171 |
+
void Resize(int new_size, const Element& value);
|
172 |
+
|
173 |
+
// Gets the underlying array. This pointer is possibly invalidated by
|
174 |
+
// any add or remove operation.
|
175 |
+
Element* mutable_data();
|
176 |
+
const Element* data() const;
|
177 |
+
|
178 |
+
// Swap entire contents with "other". If they are separate arenas then, copies
|
179 |
+
// data between each other.
|
180 |
+
void Swap(RepeatedField* other);
|
181 |
+
|
182 |
+
// Swap entire contents with "other". Should be called only if the caller can
|
183 |
+
// guarantee that both repeated fields are on the same arena or are on the
|
184 |
+
// heap. Swapping between different arenas is disallowed and caught by a
|
185 |
+
// GOOGLE_DCHECK (see API docs for details).
|
186 |
+
void UnsafeArenaSwap(RepeatedField* other);
|
187 |
+
|
188 |
+
// Swap two elements.
|
189 |
+
void SwapElements(int index1, int index2);
|
190 |
+
|
191 |
+
// STL-like iterator support
|
192 |
+
typedef Element* iterator;
|
193 |
+
typedef const Element* const_iterator;
|
194 |
+
typedef Element value_type;
|
195 |
+
typedef value_type& reference;
|
196 |
+
typedef const value_type& const_reference;
|
197 |
+
typedef value_type* pointer;
|
198 |
+
typedef const value_type* const_pointer;
|
199 |
+
typedef int size_type;
|
200 |
+
typedef ptrdiff_t difference_type;
|
201 |
+
|
202 |
+
iterator begin();
|
203 |
+
const_iterator begin() const;
|
204 |
+
const_iterator cbegin() const;
|
205 |
+
iterator end();
|
206 |
+
const_iterator end() const;
|
207 |
+
const_iterator cend() const;
|
208 |
+
|
209 |
+
// Reverse iterator support
|
210 |
+
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
|
211 |
+
typedef std::reverse_iterator<iterator> reverse_iterator;
|
212 |
+
reverse_iterator rbegin() {
|
213 |
+
return reverse_iterator(end());
|
214 |
+
}
|
215 |
+
const_reverse_iterator rbegin() const {
|
216 |
+
return const_reverse_iterator(end());
|
217 |
+
}
|
218 |
+
reverse_iterator rend() {
|
219 |
+
return reverse_iterator(begin());
|
220 |
+
}
|
221 |
+
const_reverse_iterator rend() const {
|
222 |
+
return const_reverse_iterator(begin());
|
223 |
+
}
|
224 |
+
|
225 |
+
// Returns the number of bytes used by the repeated field, excluding
|
226 |
+
// sizeof(*this)
|
227 |
+
size_t SpaceUsedExcludingSelfLong() const;
|
228 |
+
|
229 |
+
int SpaceUsedExcludingSelf() const {
|
230 |
+
return internal::ToIntSize(SpaceUsedExcludingSelfLong());
|
231 |
+
}
|
232 |
+
|
233 |
+
// Removes the element referenced by position.
|
234 |
+
//
|
235 |
+
// Returns an iterator to the element immediately following the removed
|
236 |
+
// element.
|
237 |
+
//
|
238 |
+
// Invalidates all iterators at or after the removed element, including end().
|
239 |
+
iterator erase(const_iterator position);
|
240 |
+
|
241 |
+
// Removes the elements in the range [first, last).
|
242 |
+
//
|
243 |
+
// Returns an iterator to the element immediately following the removed range.
|
244 |
+
//
|
245 |
+
// Invalidates all iterators at or after the removed range, including end().
|
246 |
+
iterator erase(const_iterator first, const_iterator last);
|
247 |
+
|
248 |
+
// Get the Arena on which this RepeatedField stores its elements.
|
249 |
+
::google::protobuf::Arena* GetArena() const {
|
250 |
+
return GetArenaNoVirtual();
|
251 |
+
}
|
252 |
+
|
253 |
+
// For internal use only.
|
254 |
+
//
|
255 |
+
// This is public due to it being called by generated code.
|
256 |
+
inline void InternalSwap(RepeatedField* other);
|
257 |
+
|
258 |
+
private:
|
259 |
+
static const int kInitialSize = 0;
|
260 |
+
// A note on the representation here (see also comment below for
|
261 |
+
// RepeatedPtrFieldBase's struct Rep):
|
262 |
+
//
|
263 |
+
// We maintain the same sizeof(RepeatedField) as before we added arena support
|
264 |
+
// so that we do not degrade performance by bloating memory usage. Directly
|
265 |
+
// adding an arena_ element to RepeatedField is quite costly. By using
|
266 |
+
// indirection in this way, we keep the same size when the RepeatedField is
|
267 |
+
// empty (common case), and add only an 8-byte header to the elements array
|
268 |
+
// when non-empty. We make sure to place the size fields directly in the
|
269 |
+
// RepeatedField class to avoid costly cache misses due to the indirection.
|
270 |
+
int current_size_;
|
271 |
+
int total_size_;
|
272 |
+
struct Rep {
|
273 |
+
Arena* arena;
|
274 |
+
Element elements[1];
|
275 |
+
};
|
276 |
+
// We can not use sizeof(Rep) - sizeof(Element) due to the trailing padding on
|
277 |
+
// the struct. We can not use sizeof(Arena*) as well because there might be
|
278 |
+
// a "gap" after the field arena and before the field elements (e.g., when
|
279 |
+
// Element is double and pointer is 32bit).
|
280 |
+
static const size_t kRepHeaderSize;
|
281 |
+
|
282 |
+
// We reuse the Rep* for an Arena* when total_size == 0, to avoid having to do
|
283 |
+
// an allocation in the constructor when we have an Arena.
|
284 |
+
union Pointer {
|
285 |
+
Pointer(Arena* a) : arena(a) {}
|
286 |
+
Arena* arena; // When total_size_ == 0.
|
287 |
+
Rep* rep; // When total_size_ != 0.
|
288 |
+
} ptr_;
|
289 |
+
|
290 |
+
Rep* rep() const {
|
291 |
+
GOOGLE_DCHECK_GT(total_size_, 0);
|
292 |
+
return ptr_.rep;
|
293 |
+
}
|
294 |
+
|
295 |
+
friend class Arena;
|
296 |
+
typedef void InternalArenaConstructable_;
|
297 |
+
|
298 |
+
|
299 |
+
// Move the contents of |from| into |to|, possibly clobbering |from| in the
|
300 |
+
// process. For primitive types this is just a memcpy(), but it could be
|
301 |
+
// specialized for non-primitive types to, say, swap each element instead.
|
302 |
+
void MoveArray(Element* to, Element* from, int size);
|
303 |
+
|
304 |
+
// Copy the elements of |from| into |to|.
|
305 |
+
void CopyArray(Element* to, const Element* from, int size);
|
306 |
+
|
307 |
+
// Internal helper expected by Arena methods.
|
308 |
+
inline Arena* GetArenaNoVirtual() const {
|
309 |
+
return (total_size_ == 0) ? ptr_.arena : ptr_.rep->arena;
|
310 |
+
}
|
311 |
+
|
312 |
+
// Internal helper to delete all elements and deallocate the storage.
|
313 |
+
// If Element has a trivial destructor (for example, if it's a fundamental
|
314 |
+
// type, like int32), the loop will be removed by the optimizer.
|
315 |
+
void InternalDeallocate(Rep* rep, int size) {
|
316 |
+
if (rep != NULL) {
|
317 |
+
Element* e = &rep->elements[0];
|
318 |
+
Element* limit = &rep->elements[size];
|
319 |
+
for (; e < limit; e++) {
|
320 |
+
e->~Element();
|
321 |
+
}
|
322 |
+
if (rep->arena == NULL) {
|
323 |
+
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
|
324 |
+
const size_t bytes = size * sizeof(*e) + kRepHeaderSize;
|
325 |
+
::operator delete(static_cast<void*>(rep), bytes);
|
326 |
+
#else
|
327 |
+
::operator delete(static_cast<void*>(rep));
|
328 |
+
#endif
|
329 |
+
}
|
330 |
+
}
|
331 |
+
}
|
332 |
+
|
333 |
+
friend class internal::WireFormatLite;
|
334 |
+
const Element* unsafe_data() const;
|
335 |
+
};
|
336 |
+
|
337 |
+
template<typename Element>
|
338 |
+
const size_t RepeatedField<Element>::kRepHeaderSize =
|
339 |
+
reinterpret_cast<size_t>(&reinterpret_cast<Rep*>(16)->elements[0]) - 16;
|
340 |
+
|
341 |
+
namespace internal {
|
342 |
+
template <typename It> class RepeatedPtrIterator;
|
343 |
+
template <typename It, typename VoidPtr> class RepeatedPtrOverPtrsIterator;
|
344 |
+
} // namespace internal
|
345 |
+
|
346 |
+
namespace internal {
|
347 |
+
|
348 |
+
// This is a helper template to copy an array of elements efficiently when they
|
349 |
+
// have a trivial copy constructor, and correctly otherwise. This really
|
350 |
+
// shouldn't be necessary, but our compiler doesn't optimize std::copy very
|
351 |
+
// effectively.
|
352 |
+
template <typename Element,
|
353 |
+
bool HasTrivialCopy =
|
354 |
+
std::is_pod<Element>::value>
|
355 |
+
struct ElementCopier {
|
356 |
+
void operator()(Element* to, const Element* from, int array_size);
|
357 |
+
};
|
358 |
+
|
359 |
+
} // namespace internal
|
360 |
+
|
361 |
+
namespace internal {
|
362 |
+
|
363 |
+
// type-traits helper for RepeatedPtrFieldBase: we only want to invoke
|
364 |
+
// arena-related "copy if on different arena" behavior if the necessary methods
|
365 |
+
// exist on the contained type. In particular, we rely on MergeFrom() existing
|
366 |
+
// as a general proxy for the fact that a copy will work, and we also provide a
|
367 |
+
// specific override for string*.
|
368 |
+
template <typename T>
|
369 |
+
struct TypeImplementsMergeBehaviorProbeForMergeFrom {
|
370 |
+
typedef char HasMerge;
|
371 |
+
typedef long HasNoMerge;
|
372 |
+
|
373 |
+
// We accept either of:
|
374 |
+
// - void MergeFrom(const T& other)
|
375 |
+
// - bool MergeFrom(const T& other)
|
376 |
+
//
|
377 |
+
// We mangle these names a bit to avoid compatibility issues in 'unclean'
|
378 |
+
// include environments that may have, e.g., "#define test ..." (yes, this
|
379 |
+
// exists).
|
380 |
+
template<typename U, typename RetType, RetType (U::*)(const U& arg)>
|
381 |
+
struct CheckType;
|
382 |
+
template<typename U> static HasMerge Check(
|
383 |
+
CheckType<U, void, &U::MergeFrom>*);
|
384 |
+
template<typename U> static HasMerge Check(
|
385 |
+
CheckType<U, bool, &U::MergeFrom>*);
|
386 |
+
template<typename U> static HasNoMerge Check(...);
|
387 |
+
|
388 |
+
// Resolves to either std::true_type or std::false_type.
|
389 |
+
typedef std::integral_constant<bool,
|
390 |
+
(sizeof(Check<T>(0)) == sizeof(HasMerge))> type;
|
391 |
+
};
|
392 |
+
|
393 |
+
template <typename T, typename = void>
|
394 |
+
struct TypeImplementsMergeBehavior :
|
395 |
+
TypeImplementsMergeBehaviorProbeForMergeFrom<T> {};
|
396 |
+
|
397 |
+
|
398 |
+
template <>
|
399 |
+
struct TypeImplementsMergeBehavior< ::std::string> {
|
400 |
+
typedef std::true_type type;
|
401 |
+
};
|
402 |
+
|
403 |
+
// This is the common base class for RepeatedPtrFields. It deals only in void*
|
404 |
+
// pointers. Users should not use this interface directly.
|
405 |
+
//
|
406 |
+
// The methods of this interface correspond to the methods of RepeatedPtrField,
|
407 |
+
// but may have a template argument called TypeHandler. Its signature is:
|
408 |
+
// class TypeHandler {
|
409 |
+
// public:
|
410 |
+
// typedef MyType Type;
|
411 |
+
// // WeakType is almost always the same as MyType, but we use it in
|
412 |
+
// // ImplicitWeakTypeHandler.
|
413 |
+
// typedef MyType WeakType;
|
414 |
+
// static Type* New();
|
415 |
+
// static WeakType* NewFromPrototype(const WeakType* prototype,
|
416 |
+
// ::google::protobuf::Arena* arena);
|
417 |
+
// static void Delete(Type*);
|
418 |
+
// static void Clear(Type*);
|
419 |
+
// static void Merge(const Type& from, Type* to);
|
420 |
+
//
|
421 |
+
// // Only needs to be implemented if SpaceUsedExcludingSelf() is called.
|
422 |
+
// static int SpaceUsedLong(const Type&);
|
423 |
+
// };
|
424 |
+
class LIBPROTOBUF_EXPORT RepeatedPtrFieldBase {
|
425 |
+
protected:
|
426 |
+
RepeatedPtrFieldBase();
|
427 |
+
explicit RepeatedPtrFieldBase(::google::protobuf::Arena* arena);
|
428 |
+
~RepeatedPtrFieldBase() {}
|
429 |
+
|
430 |
+
// Must be called from destructor.
|
431 |
+
template <typename TypeHandler>
|
432 |
+
void Destroy();
|
433 |
+
|
434 |
+
bool empty() const;
|
435 |
+
int size() const;
|
436 |
+
|
437 |
+
template <typename TypeHandler>
|
438 |
+
typename TypeHandler::Type* Mutable(int index);
|
439 |
+
template <typename TypeHandler>
|
440 |
+
void Delete(int index);
|
441 |
+
template <typename TypeHandler>
|
442 |
+
typename TypeHandler::Type* Add(typename TypeHandler::Type* prototype = NULL);
|
443 |
+
|
444 |
+
public:
|
445 |
+
// The next few methods are public so that they can be called from generated
|
446 |
+
// code when implicit weak fields are used, but they should never be called by
|
447 |
+
// application code.
|
448 |
+
|
449 |
+
template <typename TypeHandler>
|
450 |
+
const typename TypeHandler::WeakType& Get(int index) const;
|
451 |
+
|
452 |
+
// Creates and adds an element using the given prototype, without introducing
|
453 |
+
// a link-time dependency on the concrete message type. This method is used to
|
454 |
+
// implement implicit weak fields. The prototype may be NULL, in which case an
|
455 |
+
// ImplicitWeakMessage will be used as a placeholder.
|
456 |
+
google::protobuf::MessageLite* AddWeak(const google::protobuf::MessageLite* prototype);
|
457 |
+
|
458 |
+
template <typename TypeHandler>
|
459 |
+
void Clear();
|
460 |
+
|
461 |
+
template <typename TypeHandler>
|
462 |
+
void MergeFrom(const RepeatedPtrFieldBase& other);
|
463 |
+
|
464 |
+
inline void InternalSwap(RepeatedPtrFieldBase* other);
|
465 |
+
|
466 |
+
protected:
|
467 |
+
template <typename TypeHandler>
|
468 |
+
void Add(typename TypeHandler::Type&& value,
|
469 |
+
std::enable_if<TypeHandler::Moveable>* dummy = NULL);
|
470 |
+
|
471 |
+
template <typename TypeHandler>
|
472 |
+
void RemoveLast();
|
473 |
+
template <typename TypeHandler>
|
474 |
+
void CopyFrom(const RepeatedPtrFieldBase& other);
|
475 |
+
|
476 |
+
void CloseGap(int start, int num);
|
477 |
+
|
478 |
+
void Reserve(int new_size);
|
479 |
+
|
480 |
+
int Capacity() const;
|
481 |
+
|
482 |
+
// Used for constructing iterators.
|
483 |
+
void* const* raw_data() const;
|
484 |
+
void** raw_mutable_data() const;
|
485 |
+
|
486 |
+
template <typename TypeHandler>
|
487 |
+
typename TypeHandler::Type** mutable_data();
|
488 |
+
template <typename TypeHandler>
|
489 |
+
const typename TypeHandler::Type* const* data() const;
|
490 |
+
|
491 |
+
template <typename TypeHandler> GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
|
492 |
+
void Swap(RepeatedPtrFieldBase* other);
|
493 |
+
|
494 |
+
void SwapElements(int index1, int index2);
|
495 |
+
|
496 |
+
template <typename TypeHandler>
|
497 |
+
size_t SpaceUsedExcludingSelfLong() const;
|
498 |
+
|
499 |
+
// Advanced memory management --------------------------------------
|
500 |
+
|
501 |
+
// Like Add(), but if there are no cleared objects to use, returns NULL.
|
502 |
+
template <typename TypeHandler>
|
503 |
+
typename TypeHandler::Type* AddFromCleared();
|
504 |
+
|
505 |
+
template<typename TypeHandler>
|
506 |
+
void AddAllocated(typename TypeHandler::Type* value) {
|
507 |
+
typename TypeImplementsMergeBehavior<typename TypeHandler::Type>::type t;
|
508 |
+
AddAllocatedInternal<TypeHandler>(value, t);
|
509 |
+
}
|
510 |
+
|
511 |
+
template <typename TypeHandler>
|
512 |
+
void UnsafeArenaAddAllocated(typename TypeHandler::Type* value);
|
513 |
+
|
514 |
+
template <typename TypeHandler>
|
515 |
+
typename TypeHandler::Type* ReleaseLast() {
|
516 |
+
typename TypeImplementsMergeBehavior<typename TypeHandler::Type>::type t;
|
517 |
+
return ReleaseLastInternal<TypeHandler>(t);
|
518 |
+
}
|
519 |
+
|
520 |
+
// Releases last element and returns it, but does not do out-of-arena copy.
|
521 |
+
// And just returns the raw pointer to the contained element in the arena.
|
522 |
+
template <typename TypeHandler>
|
523 |
+
typename TypeHandler::Type* UnsafeArenaReleaseLast();
|
524 |
+
|
525 |
+
int ClearedCount() const;
|
526 |
+
template <typename TypeHandler>
|
527 |
+
void AddCleared(typename TypeHandler::Type* value);
|
528 |
+
template <typename TypeHandler>
|
529 |
+
typename TypeHandler::Type* ReleaseCleared();
|
530 |
+
|
531 |
+
template <typename TypeHandler>
|
532 |
+
void AddAllocatedInternal(typename TypeHandler::Type* value, std::true_type);
|
533 |
+
template <typename TypeHandler>
|
534 |
+
void AddAllocatedInternal(typename TypeHandler::Type* value, std::false_type);
|
535 |
+
|
536 |
+
template <typename TypeHandler> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
537 |
+
void AddAllocatedSlowWithCopy(typename TypeHandler::Type* value,
|
538 |
+
Arena* value_arena,
|
539 |
+
Arena* my_arena);
|
540 |
+
template <typename TypeHandler> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
541 |
+
void AddAllocatedSlowWithoutCopy(typename TypeHandler::Type* value);
|
542 |
+
|
543 |
+
template <typename TypeHandler>
|
544 |
+
typename TypeHandler::Type* ReleaseLastInternal(std::true_type);
|
545 |
+
template <typename TypeHandler>
|
546 |
+
typename TypeHandler::Type* ReleaseLastInternal(std::false_type);
|
547 |
+
|
548 |
+
template<typename TypeHandler> GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
549 |
+
void SwapFallback(RepeatedPtrFieldBase* other);
|
550 |
+
|
551 |
+
inline Arena* GetArenaNoVirtual() const {
|
552 |
+
return arena_;
|
553 |
+
}
|
554 |
+
|
555 |
+
private:
|
556 |
+
static const int kInitialSize = 0;
|
557 |
+
// A few notes on internal representation:
|
558 |
+
//
|
559 |
+
// We use an indirected approach, with struct Rep, to keep
|
560 |
+
// sizeof(RepeatedPtrFieldBase) equivalent to what it was before arena support
|
561 |
+
// was added, namely, 3 8-byte machine words on x86-64. An instance of Rep is
|
562 |
+
// allocated only when the repeated field is non-empty, and it is a
|
563 |
+
// dynamically-sized struct (the header is directly followed by elements[]).
|
564 |
+
// We place arena_ and current_size_ directly in the object to avoid cache
|
565 |
+
// misses due to the indirection, because these fields are checked frequently.
|
566 |
+
// Placing all fields directly in the RepeatedPtrFieldBase instance costs
|
567 |
+
// significant performance for memory-sensitive workloads.
|
568 |
+
Arena* arena_;
|
569 |
+
int current_size_;
|
570 |
+
int total_size_;
|
571 |
+
struct Rep {
|
572 |
+
int allocated_size;
|
573 |
+
void* elements[1];
|
574 |
+
};
|
575 |
+
static const size_t kRepHeaderSize = sizeof(Rep) - sizeof(void*);
|
576 |
+
// Contains arena ptr and the elements array. We also keep the invariant that
|
577 |
+
// if rep_ is NULL, then arena is NULL.
|
578 |
+
Rep* rep_;
|
579 |
+
|
580 |
+
template <typename TypeHandler>
|
581 |
+
static inline typename TypeHandler::Type* cast(void* element) {
|
582 |
+
return reinterpret_cast<typename TypeHandler::Type*>(element);
|
583 |
+
}
|
584 |
+
template <typename TypeHandler>
|
585 |
+
static inline const typename TypeHandler::Type* cast(const void* element) {
|
586 |
+
return reinterpret_cast<const typename TypeHandler::Type*>(element);
|
587 |
+
}
|
588 |
+
|
589 |
+
// Non-templated inner function to avoid code duplication. Takes a function
|
590 |
+
// pointer to the type-specific (templated) inner allocate/merge loop.
|
591 |
+
void MergeFromInternal(
|
592 |
+
const RepeatedPtrFieldBase& other,
|
593 |
+
void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int));
|
594 |
+
|
595 |
+
template<typename TypeHandler>
|
596 |
+
void MergeFromInnerLoop(
|
597 |
+
void** our_elems, void** other_elems, int length, int already_allocated);
|
598 |
+
|
599 |
+
// Internal helper: extend array space if necessary to contain |extend_amount|
|
600 |
+
// more elements, and return a pointer to the element immediately following
|
601 |
+
// the old list of elements. This interface factors out common behavior from
|
602 |
+
// Reserve() and MergeFrom() to reduce code size. |extend_amount| must be > 0.
|
603 |
+
void** InternalExtend(int extend_amount);
|
604 |
+
|
605 |
+
// The reflection implementation needs to call protected methods directly,
|
606 |
+
// reinterpreting pointers as being to Message instead of a specific Message
|
607 |
+
// subclass.
|
608 |
+
friend class GeneratedMessageReflection;
|
609 |
+
|
610 |
+
// ExtensionSet stores repeated message extensions as
|
611 |
+
// RepeatedPtrField<MessageLite>, but non-lite ExtensionSets need to implement
|
612 |
+
// SpaceUsedLong(), and thus need to call SpaceUsedExcludingSelfLong()
|
613 |
+
// reinterpreting MessageLite as Message. ExtensionSet also needs to make use
|
614 |
+
// of AddFromCleared(), which is not part of the public interface.
|
615 |
+
friend class ExtensionSet;
|
616 |
+
|
617 |
+
// The MapFieldBase implementation needs to call protected methods directly,
|
618 |
+
// reinterpreting pointers as being to Message instead of a specific Message
|
619 |
+
// subclass.
|
620 |
+
friend class MapFieldBase;
|
621 |
+
|
622 |
+
// The table-driven MergePartialFromCodedStream implementation needs to
|
623 |
+
// operate on RepeatedPtrField<MessageLite>.
|
624 |
+
friend class MergePartialFromCodedStreamHelper;
|
625 |
+
|
626 |
+
// To parse directly into a proto2 generated class, the upb class GMR_Handlers
|
627 |
+
// needs to be able to modify a RepeatedPtrFieldBase directly.
|
628 |
+
friend class upb::google_opensource::GMR_Handlers;
|
629 |
+
|
630 |
+
friend class AccessorHelper;
|
631 |
+
|
632 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(RepeatedPtrFieldBase);
|
633 |
+
};
|
634 |
+
|
635 |
+
template <typename GenericType>
|
636 |
+
class GenericTypeHandler {
|
637 |
+
public:
|
638 |
+
typedef GenericType Type;
|
639 |
+
typedef GenericType WeakType;
|
640 |
+
static const bool Moveable = false;
|
641 |
+
|
642 |
+
static inline GenericType* New(Arena* arena) {
|
643 |
+
return ::google::protobuf::Arena::CreateMaybeMessage<Type>(arena);
|
644 |
+
}
|
645 |
+
static inline GenericType* NewFromPrototype(
|
646 |
+
const GenericType* prototype, ::google::protobuf::Arena* arena = NULL);
|
647 |
+
static inline void Delete(GenericType* value, Arena* arena) {
|
648 |
+
if (arena == NULL) {
|
649 |
+
delete value;
|
650 |
+
}
|
651 |
+
}
|
652 |
+
static inline ::google::protobuf::Arena* GetArena(GenericType* value) {
|
653 |
+
return ::google::protobuf::Arena::GetArena<Type>(value);
|
654 |
+
}
|
655 |
+
static inline void* GetMaybeArenaPointer(GenericType* value) {
|
656 |
+
return ::google::protobuf::Arena::GetArena<Type>(value);
|
657 |
+
}
|
658 |
+
|
659 |
+
static inline void Clear(GenericType* value) { value->Clear(); }
|
660 |
+
GOOGLE_PROTOBUF_ATTRIBUTE_NOINLINE
|
661 |
+
static void Merge(const GenericType& from, GenericType* to);
|
662 |
+
static inline size_t SpaceUsedLong(const GenericType& value) {
|
663 |
+
return value.SpaceUsedLong();
|
664 |
+
}
|
665 |
+
};
|
666 |
+
|
667 |
+
template <typename GenericType>
|
668 |
+
GenericType* GenericTypeHandler<GenericType>::NewFromPrototype(
|
669 |
+
const GenericType* /* prototype */, ::google::protobuf::Arena* arena) {
|
670 |
+
return New(arena);
|
671 |
+
}
|
672 |
+
template <typename GenericType>
|
673 |
+
void GenericTypeHandler<GenericType>::Merge(const GenericType& from,
|
674 |
+
GenericType* to) {
|
675 |
+
to->MergeFrom(from);
|
676 |
+
}
|
677 |
+
|
678 |
+
// NewFromPrototype() and Merge() are not defined inline here, as we will need
|
679 |
+
// to do a virtual function dispatch anyways to go from Message* to call
|
680 |
+
// New/Merge.
|
681 |
+
template<>
|
682 |
+
MessageLite* GenericTypeHandler<MessageLite>::NewFromPrototype(
|
683 |
+
const MessageLite* prototype, google::protobuf::Arena* arena);
|
684 |
+
template<>
|
685 |
+
inline google::protobuf::Arena* GenericTypeHandler<MessageLite>::GetArena(
|
686 |
+
MessageLite* value) {
|
687 |
+
return value->GetArena();
|
688 |
+
}
|
689 |
+
template<>
|
690 |
+
inline void* GenericTypeHandler<MessageLite>::GetMaybeArenaPointer(
|
691 |
+
MessageLite* value) {
|
692 |
+
return value->GetMaybeArenaPointer();
|
693 |
+
}
|
694 |
+
template <>
|
695 |
+
void GenericTypeHandler<MessageLite>::Merge(const MessageLite& from,
|
696 |
+
MessageLite* to);
|
697 |
+
template<>
|
698 |
+
inline void GenericTypeHandler<string>::Clear(string* value) {
|
699 |
+
value->clear();
|
700 |
+
}
|
701 |
+
template<>
|
702 |
+
void GenericTypeHandler<string>::Merge(const string& from,
|
703 |
+
string* to);
|
704 |
+
|
705 |
+
// Declarations of the specialization as we cannot define them here, as the
|
706 |
+
// header that defines ProtocolMessage depends on types defined in this header.
|
707 |
+
#define DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(TypeName) \
|
708 |
+
template<> \
|
709 |
+
TypeName* GenericTypeHandler<TypeName>::NewFromPrototype( \
|
710 |
+
const TypeName* prototype, google::protobuf::Arena* arena); \
|
711 |
+
template<> \
|
712 |
+
google::protobuf::Arena* GenericTypeHandler<TypeName>::GetArena( \
|
713 |
+
TypeName* value); \
|
714 |
+
template<> \
|
715 |
+
void* GenericTypeHandler<TypeName>::GetMaybeArenaPointer( \
|
716 |
+
TypeName* value);
|
717 |
+
|
718 |
+
// Message specialization bodies defined in message.cc. This split is necessary
|
719 |
+
// to allow proto2-lite (which includes this header) to be independent of
|
720 |
+
// Message.
|
721 |
+
DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(Message)
|
722 |
+
|
723 |
+
|
724 |
+
#undef DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES
|
725 |
+
|
726 |
+
class StringTypeHandler {
|
727 |
+
public:
|
728 |
+
typedef string Type;
|
729 |
+
typedef string WeakType;
|
730 |
+
static const bool Moveable = std::is_move_constructible<Type>::value &&
|
731 |
+
std::is_move_assignable<Type>::value;
|
732 |
+
|
733 |
+
static inline string* New(Arena* arena) {
|
734 |
+
return Arena::Create<string>(arena);
|
735 |
+
}
|
736 |
+
static inline string* New(Arena* arena, string&& value) {
|
737 |
+
return Arena::Create<string>(arena, std::move(value));
|
738 |
+
}
|
739 |
+
static inline string* NewFromPrototype(const string*,
|
740 |
+
::google::protobuf::Arena* arena) {
|
741 |
+
return New(arena);
|
742 |
+
}
|
743 |
+
static inline ::google::protobuf::Arena* GetArena(string*) {
|
744 |
+
return NULL;
|
745 |
+
}
|
746 |
+
static inline void* GetMaybeArenaPointer(string* /* value */) {
|
747 |
+
return NULL;
|
748 |
+
}
|
749 |
+
static inline void Delete(string* value, Arena* arena) {
|
750 |
+
if (arena == NULL) {
|
751 |
+
delete value;
|
752 |
+
}
|
753 |
+
}
|
754 |
+
static inline void Clear(string* value) { value->clear(); }
|
755 |
+
static inline void Merge(const string& from, string* to) { *to = from; }
|
756 |
+
static size_t SpaceUsedLong(const string& value) {
|
757 |
+
return sizeof(value) + StringSpaceUsedExcludingSelfLong(value);
|
758 |
+
}
|
759 |
+
};
|
760 |
+
|
761 |
+
} // namespace internal
|
762 |
+
|
763 |
+
// RepeatedPtrField is like RepeatedField, but used for repeated strings or
|
764 |
+
// Messages.
|
765 |
+
template <typename Element>
|
766 |
+
class RepeatedPtrField final : private internal::RepeatedPtrFieldBase {
|
767 |
+
public:
|
768 |
+
RepeatedPtrField();
|
769 |
+
explicit RepeatedPtrField(::google::protobuf::Arena* arena);
|
770 |
+
|
771 |
+
RepeatedPtrField(const RepeatedPtrField& other);
|
772 |
+
template <typename Iter>
|
773 |
+
RepeatedPtrField(Iter begin, const Iter& end);
|
774 |
+
~RepeatedPtrField();
|
775 |
+
|
776 |
+
RepeatedPtrField& operator=(const RepeatedPtrField& other);
|
777 |
+
|
778 |
+
RepeatedPtrField(RepeatedPtrField&& other) noexcept;
|
779 |
+
RepeatedPtrField& operator=(RepeatedPtrField&& other) noexcept;
|
780 |
+
|
781 |
+
bool empty() const;
|
782 |
+
int size() const;
|
783 |
+
|
784 |
+
const Element& Get(int index) const;
|
785 |
+
Element* Mutable(int index);
|
786 |
+
Element* Add();
|
787 |
+
void Add(Element&& value);
|
788 |
+
|
789 |
+
const Element& operator[](int index) const { return Get(index); }
|
790 |
+
Element& operator[](int index) { return *Mutable(index); }
|
791 |
+
|
792 |
+
// Remove the last element in the array.
|
793 |
+
// Ownership of the element is retained by the array.
|
794 |
+
void RemoveLast();
|
795 |
+
|
796 |
+
// Delete elements with indices in the range [start .. start+num-1].
|
797 |
+
// Caution: implementation moves all elements with indices [start+num .. ].
|
798 |
+
// Calling this routine inside a loop can cause quadratic behavior.
|
799 |
+
void DeleteSubrange(int start, int num);
|
800 |
+
|
801 |
+
void Clear();
|
802 |
+
void MergeFrom(const RepeatedPtrField& other);
|
803 |
+
void CopyFrom(const RepeatedPtrField& other);
|
804 |
+
|
805 |
+
// Reserve space to expand the field to at least the given size. This only
|
806 |
+
// resizes the pointer array; it doesn't allocate any objects. If the
|
807 |
+
// array is grown, it will always be at least doubled in size.
|
808 |
+
void Reserve(int new_size);
|
809 |
+
|
810 |
+
int Capacity() const;
|
811 |
+
|
812 |
+
// Gets the underlying array. This pointer is possibly invalidated by
|
813 |
+
// any add or remove operation.
|
814 |
+
Element** mutable_data();
|
815 |
+
const Element* const* data() const;
|
816 |
+
|
817 |
+
// Swap entire contents with "other". If they are on separate arenas, then
|
818 |
+
// copies data.
|
819 |
+
void Swap(RepeatedPtrField* other);
|
820 |
+
|
821 |
+
// Swap entire contents with "other". Caller should guarantee that either both
|
822 |
+
// fields are on the same arena or both are on the heap. Swapping between
|
823 |
+
// different arenas with this function is disallowed and is caught via
|
824 |
+
// GOOGLE_DCHECK.
|
825 |
+
void UnsafeArenaSwap(RepeatedPtrField* other);
|
826 |
+
|
827 |
+
// Swap two elements.
|
828 |
+
void SwapElements(int index1, int index2);
|
829 |
+
|
830 |
+
// STL-like iterator support
|
831 |
+
typedef internal::RepeatedPtrIterator<Element> iterator;
|
832 |
+
typedef internal::RepeatedPtrIterator<const Element> const_iterator;
|
833 |
+
typedef Element value_type;
|
834 |
+
typedef value_type& reference;
|
835 |
+
typedef const value_type& const_reference;
|
836 |
+
typedef value_type* pointer;
|
837 |
+
typedef const value_type* const_pointer;
|
838 |
+
typedef int size_type;
|
839 |
+
typedef ptrdiff_t difference_type;
|
840 |
+
|
841 |
+
iterator begin();
|
842 |
+
const_iterator begin() const;
|
843 |
+
const_iterator cbegin() const;
|
844 |
+
iterator end();
|
845 |
+
const_iterator end() const;
|
846 |
+
const_iterator cend() const;
|
847 |
+
|
848 |
+
// Reverse iterator support
|
849 |
+
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
|
850 |
+
typedef std::reverse_iterator<iterator> reverse_iterator;
|
851 |
+
reverse_iterator rbegin() {
|
852 |
+
return reverse_iterator(end());
|
853 |
+
}
|
854 |
+
const_reverse_iterator rbegin() const {
|
855 |
+
return const_reverse_iterator(end());
|
856 |
+
}
|
857 |
+
reverse_iterator rend() {
|
858 |
+
return reverse_iterator(begin());
|
859 |
+
}
|
860 |
+
const_reverse_iterator rend() const {
|
861 |
+
return const_reverse_iterator(begin());
|
862 |
+
}
|
863 |
+
|
864 |
+
// Custom STL-like iterator that iterates over and returns the underlying
|
865 |
+
// pointers to Element rather than Element itself.
|
866 |
+
typedef internal::RepeatedPtrOverPtrsIterator<Element*, void*>
|
867 |
+
pointer_iterator;
|
868 |
+
typedef internal::RepeatedPtrOverPtrsIterator<const Element* const,
|
869 |
+
const void* const>
|
870 |
+
const_pointer_iterator;
|
871 |
+
pointer_iterator pointer_begin();
|
872 |
+
const_pointer_iterator pointer_begin() const;
|
873 |
+
pointer_iterator pointer_end();
|
874 |
+
const_pointer_iterator pointer_end() const;
|
875 |
+
|
876 |
+
// Returns (an estimate of) the number of bytes used by the repeated field,
|
877 |
+
// excluding sizeof(*this).
|
878 |
+
size_t SpaceUsedExcludingSelfLong() const;
|
879 |
+
|
880 |
+
int SpaceUsedExcludingSelf() const {
|
881 |
+
return internal::ToIntSize(SpaceUsedExcludingSelfLong());
|
882 |
+
}
|
883 |
+
|
884 |
+
// Advanced memory management --------------------------------------
|
885 |
+
// When hardcore memory management becomes necessary -- as it sometimes
|
886 |
+
// does here at Google -- the following methods may be useful.
|
887 |
+
|
888 |
+
// Add an already-allocated object, passing ownership to the
|
889 |
+
// RepeatedPtrField.
|
890 |
+
//
|
891 |
+
// Note that some special behavior occurs with respect to arenas:
|
892 |
+
//
|
893 |
+
// (i) if this field holds submessages, the new submessage will be copied if
|
894 |
+
// the original is in an arena and this RepeatedPtrField is either in a
|
895 |
+
// different arena, or on the heap.
|
896 |
+
// (ii) if this field holds strings, the passed-in string *must* be
|
897 |
+
// heap-allocated, not arena-allocated. There is no way to dynamically check
|
898 |
+
// this at runtime, so User Beware.
|
899 |
+
void AddAllocated(Element* value);
|
900 |
+
|
901 |
+
// Remove the last element and return it, passing ownership to the caller.
|
902 |
+
// Requires: size() > 0
|
903 |
+
//
|
904 |
+
// If this RepeatedPtrField is on an arena, an object copy is required to pass
|
905 |
+
// ownership back to the user (for compatible semantics). Use
|
906 |
+
// UnsafeArenaReleaseLast() if this behavior is undesired.
|
907 |
+
Element* ReleaseLast();
|
908 |
+
|
909 |
+
// Add an already-allocated object, skipping arena-ownership checks. The user
|
910 |
+
// must guarantee that the given object is in the same arena as this
|
911 |
+
// RepeatedPtrField.
|
912 |
+
// It is also useful in legacy code that uses temporary ownership to avoid
|
913 |
+
// copies. Example:
|
914 |
+
// RepeatedPtrField<T> temp_field;
|
915 |
+
// temp_field.AddAllocated(new T);
|
916 |
+
// ... // Do something with temp_field
|
917 |
+
// temp_field.ExtractSubrange(0, temp_field.size(), nullptr);
|
918 |
+
// If you put temp_field on the arena this fails, because the ownership
|
919 |
+
// transfers to the arena at the "AddAllocated" call and is not released
|
920 |
+
// anymore causing a double delete. UnsafeArenaAddAllocated prevents this.
|
921 |
+
void UnsafeArenaAddAllocated(Element* value);
|
922 |
+
|
923 |
+
// Remove the last element and return it. Works only when operating on an
|
924 |
+
// arena. The returned pointer is to the original object in the arena, hence
|
925 |
+
// has the arena's lifetime.
|
926 |
+
// Requires: current_size_ > 0
|
927 |
+
Element* UnsafeArenaReleaseLast();
|
928 |
+
|
929 |
+
// Extract elements with indices in the range "[start .. start+num-1]".
|
930 |
+
// The caller assumes ownership of the extracted elements and is responsible
|
931 |
+
// for deleting them when they are no longer needed.
|
932 |
+
// If "elements" is non-NULL, then pointers to the extracted elements
|
933 |
+
// are stored in "elements[0 .. num-1]" for the convenience of the caller.
|
934 |
+
// If "elements" is NULL, then the caller must use some other mechanism
|
935 |
+
// to perform any further operations (like deletion) on these elements.
|
936 |
+
// Caution: implementation also moves elements with indices [start+num ..].
|
937 |
+
// Calling this routine inside a loop can cause quadratic behavior.
|
938 |
+
//
|
939 |
+
// Memory copying behavior is identical to ReleaseLast(), described above: if
|
940 |
+
// this RepeatedPtrField is on an arena, an object copy is performed for each
|
941 |
+
// returned element, so that all returned element pointers are to
|
942 |
+
// heap-allocated copies. If this copy is not desired, the user should call
|
943 |
+
// UnsafeArenaExtractSubrange().
|
944 |
+
void ExtractSubrange(int start, int num, Element** elements);
|
945 |
+
|
946 |
+
// Identical to ExtractSubrange() described above, except that when this
|
947 |
+
// repeated field is on an arena, no object copies are performed. Instead, the
|
948 |
+
// raw object pointers are returned. Thus, if on an arena, the returned
|
949 |
+
// objects must not be freed, because they will not be heap-allocated objects.
|
950 |
+
void UnsafeArenaExtractSubrange(int start, int num, Element** elements);
|
951 |
+
|
952 |
+
// When elements are removed by calls to RemoveLast() or Clear(), they
|
953 |
+
// are not actually freed. Instead, they are cleared and kept so that
|
954 |
+
// they can be reused later. This can save lots of CPU time when
|
955 |
+
// repeatedly reusing a protocol message for similar purposes.
|
956 |
+
//
|
957 |
+
// Hardcore programs may choose to manipulate these cleared objects
|
958 |
+
// to better optimize memory management using the following routines.
|
959 |
+
|
960 |
+
// Get the number of cleared objects that are currently being kept
|
961 |
+
// around for reuse.
|
962 |
+
int ClearedCount() const;
|
963 |
+
// Add an element to the pool of cleared objects, passing ownership to
|
964 |
+
// the RepeatedPtrField. The element must be cleared prior to calling
|
965 |
+
// this method.
|
966 |
+
//
|
967 |
+
// This method cannot be called when the repeated field is on an arena or when
|
968 |
+
// |value| is; both cases will trigger a GOOGLE_DCHECK-failure.
|
969 |
+
void AddCleared(Element* value);
|
970 |
+
// Remove a single element from the cleared pool and return it, passing
|
971 |
+
// ownership to the caller. The element is guaranteed to be cleared.
|
972 |
+
// Requires: ClearedCount() > 0
|
973 |
+
//
|
974 |
+
//
|
975 |
+
// This method cannot be called when the repeated field is on an arena; doing
|
976 |
+
// so will trigger a GOOGLE_DCHECK-failure.
|
977 |
+
Element* ReleaseCleared();
|
978 |
+
|
979 |
+
// Removes the element referenced by position.
|
980 |
+
//
|
981 |
+
// Returns an iterator to the element immediately following the removed
|
982 |
+
// element.
|
983 |
+
//
|
984 |
+
// Invalidates all iterators at or after the removed element, including end().
|
985 |
+
iterator erase(const_iterator position);
|
986 |
+
|
987 |
+
// Removes the elements in the range [first, last).
|
988 |
+
//
|
989 |
+
// Returns an iterator to the element immediately following the removed range.
|
990 |
+
//
|
991 |
+
// Invalidates all iterators at or after the removed range, including end().
|
992 |
+
iterator erase(const_iterator first, const_iterator last);
|
993 |
+
|
994 |
+
// Gets the arena on which this RepeatedPtrField stores its elements.
|
995 |
+
::google::protobuf::Arena* GetArena() const {
|
996 |
+
return GetArenaNoVirtual();
|
997 |
+
}
|
998 |
+
|
999 |
+
// For internal use only.
|
1000 |
+
//
|
1001 |
+
// This is public due to it being called by generated code.
|
1002 |
+
using RepeatedPtrFieldBase::InternalSwap;
|
1003 |
+
|
1004 |
+
private:
|
1005 |
+
// Note: RepeatedPtrField SHOULD NOT be subclassed by users.
|
1006 |
+
class TypeHandler;
|
1007 |
+
|
1008 |
+
// Internal arena accessor expected by helpers in Arena.
|
1009 |
+
inline Arena* GetArenaNoVirtual() const;
|
1010 |
+
|
1011 |
+
// Implementations for ExtractSubrange(). The copying behavior must be
|
1012 |
+
// included only if the type supports the necessary operations (e.g.,
|
1013 |
+
// MergeFrom()), so we must resolve this at compile time. ExtractSubrange()
|
1014 |
+
// uses SFINAE to choose one of the below implementations.
|
1015 |
+
void ExtractSubrangeInternal(int start, int num, Element** elements,
|
1016 |
+
std::true_type);
|
1017 |
+
void ExtractSubrangeInternal(int start, int num, Element** elements,
|
1018 |
+
std::false_type);
|
1019 |
+
|
1020 |
+
friend class Arena;
|
1021 |
+
friend class MessageLite;
|
1022 |
+
|
1023 |
+
typedef void InternalArenaConstructable_;
|
1024 |
+
|
1025 |
+
};
|
1026 |
+
|
1027 |
+
// implementation ====================================================
|
1028 |
+
|
1029 |
+
template <typename Element>
|
1030 |
+
inline RepeatedField<Element>::RepeatedField()
|
1031 |
+
: current_size_(0),
|
1032 |
+
total_size_(0),
|
1033 |
+
ptr_(NULL) {
|
1034 |
+
}
|
1035 |
+
|
1036 |
+
template <typename Element>
|
1037 |
+
inline RepeatedField<Element>::RepeatedField(Arena* arena)
|
1038 |
+
: current_size_(0),
|
1039 |
+
total_size_(0),
|
1040 |
+
ptr_(arena) {
|
1041 |
+
}
|
1042 |
+
|
1043 |
+
template <typename Element>
|
1044 |
+
inline RepeatedField<Element>::RepeatedField(const RepeatedField& other)
|
1045 |
+
: current_size_(0),
|
1046 |
+
total_size_(0),
|
1047 |
+
ptr_(NULL) {
|
1048 |
+
if (other.current_size_ != 0) {
|
1049 |
+
Reserve(other.size());
|
1050 |
+
AddNAlreadyReserved(other.size());
|
1051 |
+
CopyArray(Mutable(0), &other.Get(0), other.size());
|
1052 |
+
}
|
1053 |
+
}
|
1054 |
+
|
1055 |
+
template <typename Element>
|
1056 |
+
template <typename Iter>
|
1057 |
+
RepeatedField<Element>::RepeatedField(Iter begin, const Iter& end)
|
1058 |
+
: current_size_(0),
|
1059 |
+
total_size_(0),
|
1060 |
+
ptr_(NULL) {
|
1061 |
+
int reserve = internal::CalculateReserve(begin, end);
|
1062 |
+
if (reserve != -1) {
|
1063 |
+
Reserve(reserve);
|
1064 |
+
for (; begin != end; ++begin) {
|
1065 |
+
AddAlreadyReserved(*begin);
|
1066 |
+
}
|
1067 |
+
} else {
|
1068 |
+
for (; begin != end; ++begin) {
|
1069 |
+
Add(*begin);
|
1070 |
+
}
|
1071 |
+
}
|
1072 |
+
}
|
1073 |
+
|
1074 |
+
template <typename Element>
|
1075 |
+
RepeatedField<Element>::~RepeatedField() {
|
1076 |
+
if (total_size_ > 0) {
|
1077 |
+
InternalDeallocate(rep(), total_size_);
|
1078 |
+
}
|
1079 |
+
}
|
1080 |
+
|
1081 |
+
template <typename Element>
|
1082 |
+
inline RepeatedField<Element>&
|
1083 |
+
RepeatedField<Element>::operator=(const RepeatedField& other) {
|
1084 |
+
if (this != &other)
|
1085 |
+
CopyFrom(other);
|
1086 |
+
return *this;
|
1087 |
+
}
|
1088 |
+
|
1089 |
+
template <typename Element>
|
1090 |
+
inline RepeatedField<Element>::RepeatedField(RepeatedField&& other) noexcept
|
1091 |
+
: RepeatedField() {
|
1092 |
+
// We don't just call Swap(&other) here because it would perform 3 copies if
|
1093 |
+
// the two fields are on different arenas.
|
1094 |
+
if (other.GetArenaNoVirtual()) {
|
1095 |
+
CopyFrom(other);
|
1096 |
+
} else {
|
1097 |
+
InternalSwap(&other);
|
1098 |
+
}
|
1099 |
+
}
|
1100 |
+
|
1101 |
+
template <typename Element>
|
1102 |
+
inline RepeatedField<Element>& RepeatedField<Element>::operator=(
|
1103 |
+
RepeatedField&& other) noexcept {
|
1104 |
+
// We don't just call Swap(&other) here because it would perform 3 copies if
|
1105 |
+
// the two fields are on different arenas.
|
1106 |
+
if (this != &other) {
|
1107 |
+
if (this->GetArenaNoVirtual() != other.GetArenaNoVirtual()) {
|
1108 |
+
CopyFrom(other);
|
1109 |
+
} else {
|
1110 |
+
InternalSwap(&other);
|
1111 |
+
}
|
1112 |
+
}
|
1113 |
+
return *this;
|
1114 |
+
}
|
1115 |
+
|
1116 |
+
template <typename Element>
|
1117 |
+
inline bool RepeatedField<Element>::empty() const {
|
1118 |
+
return current_size_ == 0;
|
1119 |
+
}
|
1120 |
+
|
1121 |
+
template <typename Element>
|
1122 |
+
inline int RepeatedField<Element>::size() const {
|
1123 |
+
return current_size_;
|
1124 |
+
}
|
1125 |
+
|
1126 |
+
template <typename Element>
|
1127 |
+
inline int RepeatedField<Element>::Capacity() const {
|
1128 |
+
return total_size_;
|
1129 |
+
}
|
1130 |
+
|
1131 |
+
template<typename Element>
|
1132 |
+
inline void RepeatedField<Element>::AddAlreadyReserved(const Element& value) {
|
1133 |
+
GOOGLE_DCHECK_LT(current_size_, total_size_);
|
1134 |
+
rep()->elements[current_size_++] = value;
|
1135 |
+
}
|
1136 |
+
|
1137 |
+
template<typename Element>
|
1138 |
+
inline Element* RepeatedField<Element>::AddAlreadyReserved() {
|
1139 |
+
GOOGLE_DCHECK_LT(current_size_, total_size_);
|
1140 |
+
return &rep()->elements[current_size_++];
|
1141 |
+
}
|
1142 |
+
|
1143 |
+
template<typename Element>
|
1144 |
+
inline Element* RepeatedField<Element>::AddNAlreadyReserved(int elements) {
|
1145 |
+
GOOGLE_DCHECK_LE(current_size_ + elements, total_size_);
|
1146 |
+
// Warning: total_size_ can be NULL if elements == 0 && current_size_ == 0.
|
1147 |
+
// Existing callers depend on this behavior. :(
|
1148 |
+
Element* ret = &ptr_.rep->elements[current_size_];
|
1149 |
+
current_size_ += elements;
|
1150 |
+
return ret;
|
1151 |
+
}
|
1152 |
+
|
1153 |
+
template<typename Element>
|
1154 |
+
inline void RepeatedField<Element>::Resize(int new_size, const Element& value) {
|
1155 |
+
GOOGLE_DCHECK_GE(new_size, 0);
|
1156 |
+
if (new_size > current_size_) {
|
1157 |
+
Reserve(new_size);
|
1158 |
+
std::fill(&rep()->elements[current_size_],
|
1159 |
+
&rep()->elements[new_size], value);
|
1160 |
+
}
|
1161 |
+
current_size_ = new_size;
|
1162 |
+
}
|
1163 |
+
|
1164 |
+
template <typename Element>
|
1165 |
+
inline const Element& RepeatedField<Element>::Get(int index) const {
|
1166 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1167 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1168 |
+
return rep()->elements[index];
|
1169 |
+
}
|
1170 |
+
|
1171 |
+
template <typename Element>
|
1172 |
+
inline Element* RepeatedField<Element>::Mutable(int index) {
|
1173 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1174 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1175 |
+
return &rep()->elements[index];
|
1176 |
+
}
|
1177 |
+
|
1178 |
+
template <typename Element>
|
1179 |
+
inline void RepeatedField<Element>::Set(int index, const Element& value) {
|
1180 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1181 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1182 |
+
rep()->elements[index] = value;
|
1183 |
+
}
|
1184 |
+
|
1185 |
+
template <typename Element>
|
1186 |
+
inline void RepeatedField<Element>::Add(const Element& value) {
|
1187 |
+
if (current_size_ == total_size_) Reserve(total_size_ + 1);
|
1188 |
+
rep()->elements[current_size_++] = value;
|
1189 |
+
}
|
1190 |
+
|
1191 |
+
template <typename Element>
|
1192 |
+
inline Element* RepeatedField<Element>::Add() {
|
1193 |
+
if (current_size_ == total_size_) Reserve(total_size_ + 1);
|
1194 |
+
return &rep()->elements[current_size_++];
|
1195 |
+
}
|
1196 |
+
|
1197 |
+
template <typename Element>
|
1198 |
+
inline void RepeatedField<Element>::RemoveLast() {
|
1199 |
+
GOOGLE_DCHECK_GT(current_size_, 0);
|
1200 |
+
current_size_--;
|
1201 |
+
}
|
1202 |
+
|
1203 |
+
template <typename Element>
|
1204 |
+
void RepeatedField<Element>::ExtractSubrange(
|
1205 |
+
int start, int num, Element* elements) {
|
1206 |
+
GOOGLE_DCHECK_GE(start, 0);
|
1207 |
+
GOOGLE_DCHECK_GE(num, 0);
|
1208 |
+
GOOGLE_DCHECK_LE(start + num, this->current_size_);
|
1209 |
+
|
1210 |
+
// Save the values of the removed elements if requested.
|
1211 |
+
if (elements != NULL) {
|
1212 |
+
for (int i = 0; i < num; ++i)
|
1213 |
+
elements[i] = this->Get(i + start);
|
1214 |
+
}
|
1215 |
+
|
1216 |
+
// Slide remaining elements down to fill the gap.
|
1217 |
+
if (num > 0) {
|
1218 |
+
for (int i = start + num; i < this->current_size_; ++i)
|
1219 |
+
this->Set(i - num, this->Get(i));
|
1220 |
+
this->Truncate(this->current_size_ - num);
|
1221 |
+
}
|
1222 |
+
}
|
1223 |
+
|
1224 |
+
template <typename Element>
|
1225 |
+
inline void RepeatedField<Element>::Clear() {
|
1226 |
+
current_size_ = 0;
|
1227 |
+
}
|
1228 |
+
|
1229 |
+
template <typename Element>
|
1230 |
+
inline void RepeatedField<Element>::MergeFrom(const RepeatedField& other) {
|
1231 |
+
GOOGLE_DCHECK_NE(&other, this);
|
1232 |
+
if (other.current_size_ != 0) {
|
1233 |
+
int existing_size = size();
|
1234 |
+
Reserve(existing_size + other.size());
|
1235 |
+
AddNAlreadyReserved(other.size());
|
1236 |
+
CopyArray(Mutable(existing_size), &other.Get(0), other.size());
|
1237 |
+
}
|
1238 |
+
}
|
1239 |
+
|
1240 |
+
template <typename Element>
|
1241 |
+
inline void RepeatedField<Element>::CopyFrom(const RepeatedField& other) {
|
1242 |
+
if (&other == this) return;
|
1243 |
+
Clear();
|
1244 |
+
MergeFrom(other);
|
1245 |
+
}
|
1246 |
+
|
1247 |
+
template <typename Element>
|
1248 |
+
inline typename RepeatedField<Element>::iterator RepeatedField<Element>::erase(
|
1249 |
+
const_iterator position) {
|
1250 |
+
return erase(position, position + 1);
|
1251 |
+
}
|
1252 |
+
|
1253 |
+
template <typename Element>
|
1254 |
+
inline typename RepeatedField<Element>::iterator RepeatedField<Element>::erase(
|
1255 |
+
const_iterator first, const_iterator last) {
|
1256 |
+
size_type first_offset = first - cbegin();
|
1257 |
+
if (first != last) {
|
1258 |
+
Truncate(std::copy(last, cend(), begin() + first_offset) - cbegin());
|
1259 |
+
}
|
1260 |
+
return begin() + first_offset;
|
1261 |
+
}
|
1262 |
+
|
1263 |
+
template <typename Element>
|
1264 |
+
inline Element* RepeatedField<Element>::mutable_data() {
|
1265 |
+
return total_size_ > 0 ? rep()->elements : NULL;
|
1266 |
+
}
|
1267 |
+
|
1268 |
+
template <typename Element>
|
1269 |
+
inline const Element* RepeatedField<Element>::data() const {
|
1270 |
+
return total_size_ > 0 ? rep()->elements : NULL;
|
1271 |
+
}
|
1272 |
+
|
1273 |
+
template <typename Element>
|
1274 |
+
inline const Element* RepeatedField<Element>::unsafe_data() const {
|
1275 |
+
return rep()->elements;
|
1276 |
+
}
|
1277 |
+
|
1278 |
+
template <typename Element>
|
1279 |
+
inline void RepeatedField<Element>::InternalSwap(RepeatedField* other) {
|
1280 |
+
GOOGLE_DCHECK(this != other);
|
1281 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
|
1282 |
+
|
1283 |
+
std::swap(ptr_, other->ptr_);
|
1284 |
+
std::swap(current_size_, other->current_size_);
|
1285 |
+
std::swap(total_size_, other->total_size_);
|
1286 |
+
}
|
1287 |
+
|
1288 |
+
template <typename Element>
|
1289 |
+
void RepeatedField<Element>::Swap(RepeatedField* other) {
|
1290 |
+
if (this == other) return;
|
1291 |
+
if (GetArenaNoVirtual() == other->GetArenaNoVirtual()) {
|
1292 |
+
InternalSwap(other);
|
1293 |
+
} else {
|
1294 |
+
RepeatedField<Element> temp(other->GetArenaNoVirtual());
|
1295 |
+
temp.MergeFrom(*this);
|
1296 |
+
CopyFrom(*other);
|
1297 |
+
other->UnsafeArenaSwap(&temp);
|
1298 |
+
}
|
1299 |
+
}
|
1300 |
+
|
1301 |
+
template <typename Element>
|
1302 |
+
void RepeatedField<Element>::UnsafeArenaSwap(RepeatedField* other) {
|
1303 |
+
if (this == other) return;
|
1304 |
+
InternalSwap(other);
|
1305 |
+
}
|
1306 |
+
|
1307 |
+
template <typename Element>
|
1308 |
+
void RepeatedField<Element>::SwapElements(int index1, int index2) {
|
1309 |
+
using std::swap; // enable ADL with fallback
|
1310 |
+
swap(rep()->elements[index1], rep()->elements[index2]);
|
1311 |
+
}
|
1312 |
+
|
1313 |
+
template <typename Element>
|
1314 |
+
inline typename RepeatedField<Element>::iterator
|
1315 |
+
RepeatedField<Element>::begin() {
|
1316 |
+
return total_size_ > 0 ? rep()->elements : NULL;
|
1317 |
+
}
|
1318 |
+
template <typename Element>
|
1319 |
+
inline typename RepeatedField<Element>::const_iterator
|
1320 |
+
RepeatedField<Element>::begin() const {
|
1321 |
+
return total_size_ > 0 ? rep()->elements : NULL;
|
1322 |
+
}
|
1323 |
+
template <typename Element>
|
1324 |
+
inline typename RepeatedField<Element>::const_iterator
|
1325 |
+
RepeatedField<Element>::cbegin() const {
|
1326 |
+
return total_size_ > 0 ? rep()->elements : NULL;
|
1327 |
+
}
|
1328 |
+
template <typename Element>
|
1329 |
+
inline typename RepeatedField<Element>::iterator
|
1330 |
+
RepeatedField<Element>::end() {
|
1331 |
+
return total_size_ > 0 ? rep()->elements + current_size_ : NULL;
|
1332 |
+
}
|
1333 |
+
template <typename Element>
|
1334 |
+
inline typename RepeatedField<Element>::const_iterator
|
1335 |
+
RepeatedField<Element>::end() const {
|
1336 |
+
return total_size_ > 0 ? rep()->elements + current_size_ : NULL;
|
1337 |
+
}
|
1338 |
+
template <typename Element>
|
1339 |
+
inline typename RepeatedField<Element>::const_iterator
|
1340 |
+
RepeatedField<Element>::cend() const {
|
1341 |
+
return total_size_ > 0 ? rep()->elements + current_size_ : NULL;
|
1342 |
+
}
|
1343 |
+
|
1344 |
+
template <typename Element>
|
1345 |
+
inline size_t RepeatedField<Element>::SpaceUsedExcludingSelfLong() const {
|
1346 |
+
return total_size_ > 0 ? (total_size_ * sizeof(Element) + kRepHeaderSize) : 0;
|
1347 |
+
}
|
1348 |
+
|
1349 |
+
// Avoid inlining of Reserve(): new, copy, and delete[] lead to a significant
|
1350 |
+
// amount of code bloat.
|
1351 |
+
template <typename Element>
|
1352 |
+
void RepeatedField<Element>::Reserve(int new_size) {
|
1353 |
+
if (total_size_ >= new_size) return;
|
1354 |
+
Rep* old_rep = total_size_ > 0 ? rep() : NULL;
|
1355 |
+
Arena* arena = GetArenaNoVirtual();
|
1356 |
+
new_size = std::max(google::protobuf::internal::kMinRepeatedFieldAllocationSize,
|
1357 |
+
std::max(total_size_ * 2, new_size));
|
1358 |
+
GOOGLE_DCHECK_LE(
|
1359 |
+
static_cast<size_t>(new_size),
|
1360 |
+
(std::numeric_limits<size_t>::max() - kRepHeaderSize) / sizeof(Element))
|
1361 |
+
<< "Requested size is too large to fit into size_t.";
|
1362 |
+
size_t bytes = kRepHeaderSize + sizeof(Element) * static_cast<size_t>(new_size);
|
1363 |
+
if (arena == NULL) {
|
1364 |
+
ptr_.rep = static_cast<Rep*>(::operator new(bytes));
|
1365 |
+
} else {
|
1366 |
+
ptr_.rep = reinterpret_cast<Rep*>(
|
1367 |
+
::google::protobuf::Arena::CreateArray<char>(arena, bytes));
|
1368 |
+
}
|
1369 |
+
ptr_.rep->arena = arena;
|
1370 |
+
int old_total_size = total_size_;
|
1371 |
+
total_size_ = new_size;
|
1372 |
+
// Invoke placement-new on newly allocated elements. We shouldn't have to do
|
1373 |
+
// this, since Element is supposed to be POD, but a previous version of this
|
1374 |
+
// code allocated storage with "new Element[size]" and some code uses
|
1375 |
+
// RepeatedField with non-POD types, relying on constructor invocation. If
|
1376 |
+
// Element has a trivial constructor (e.g., int32), gcc (tested with -O2)
|
1377 |
+
// completely removes this loop because the loop body is empty, so this has no
|
1378 |
+
// effect unless its side-effects are required for correctness.
|
1379 |
+
// Note that we do this before MoveArray() below because Element's copy
|
1380 |
+
// assignment implementation will want an initialized instance first.
|
1381 |
+
Element* e = &rep()->elements[0];
|
1382 |
+
Element* limit = e + total_size_;
|
1383 |
+
for (; e < limit; e++) {
|
1384 |
+
new (e) Element;
|
1385 |
+
}
|
1386 |
+
if (current_size_ > 0) {
|
1387 |
+
MoveArray(&rep()->elements[0], old_rep->elements, current_size_);
|
1388 |
+
}
|
1389 |
+
|
1390 |
+
// Likewise, we need to invoke destructors on the old array.
|
1391 |
+
InternalDeallocate(old_rep, old_total_size);
|
1392 |
+
|
1393 |
+
}
|
1394 |
+
|
1395 |
+
template <typename Element>
|
1396 |
+
inline void RepeatedField<Element>::Truncate(int new_size) {
|
1397 |
+
GOOGLE_DCHECK_LE(new_size, current_size_);
|
1398 |
+
if (current_size_ > 0) {
|
1399 |
+
current_size_ = new_size;
|
1400 |
+
}
|
1401 |
+
}
|
1402 |
+
|
1403 |
+
template <typename Element>
|
1404 |
+
inline void RepeatedField<Element>::MoveArray(
|
1405 |
+
Element* to, Element* from, int array_size) {
|
1406 |
+
CopyArray(to, from, array_size);
|
1407 |
+
}
|
1408 |
+
|
1409 |
+
template <typename Element>
|
1410 |
+
inline void RepeatedField<Element>::CopyArray(
|
1411 |
+
Element* to, const Element* from, int array_size) {
|
1412 |
+
internal::ElementCopier<Element>()(to, from, array_size);
|
1413 |
+
}
|
1414 |
+
|
1415 |
+
namespace internal {
|
1416 |
+
|
1417 |
+
template <typename Element, bool HasTrivialCopy>
|
1418 |
+
void ElementCopier<Element, HasTrivialCopy>::operator()(
|
1419 |
+
Element* to, const Element* from, int array_size) {
|
1420 |
+
std::copy(from, from + array_size, to);
|
1421 |
+
}
|
1422 |
+
|
1423 |
+
template <typename Element>
|
1424 |
+
struct ElementCopier<Element, true> {
|
1425 |
+
void operator()(Element* to, const Element* from, int array_size) {
|
1426 |
+
memcpy(to, from, static_cast<size_t>(array_size) * sizeof(Element));
|
1427 |
+
}
|
1428 |
+
};
|
1429 |
+
|
1430 |
+
} // namespace internal
|
1431 |
+
|
1432 |
+
|
1433 |
+
// -------------------------------------------------------------------
|
1434 |
+
|
1435 |
+
namespace internal {
|
1436 |
+
|
1437 |
+
inline RepeatedPtrFieldBase::RepeatedPtrFieldBase()
|
1438 |
+
: arena_(NULL),
|
1439 |
+
current_size_(0),
|
1440 |
+
total_size_(0),
|
1441 |
+
rep_(NULL) {
|
1442 |
+
}
|
1443 |
+
|
1444 |
+
inline RepeatedPtrFieldBase::RepeatedPtrFieldBase(::google::protobuf::Arena* arena)
|
1445 |
+
: arena_(arena),
|
1446 |
+
current_size_(0),
|
1447 |
+
total_size_(0),
|
1448 |
+
rep_(NULL) {
|
1449 |
+
}
|
1450 |
+
|
1451 |
+
template <typename TypeHandler>
|
1452 |
+
void RepeatedPtrFieldBase::Destroy() {
|
1453 |
+
if (rep_ != NULL && arena_ == NULL) {
|
1454 |
+
int n = rep_->allocated_size;
|
1455 |
+
void* const* elements = rep_->elements;
|
1456 |
+
for (int i = 0; i < n; i++) {
|
1457 |
+
TypeHandler::Delete(cast<TypeHandler>(elements[i]), NULL);
|
1458 |
+
}
|
1459 |
+
#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
|
1460 |
+
const size_t size = total_size_ * sizeof(elements[0]) + kRepHeaderSize;
|
1461 |
+
::operator delete(static_cast<void*>(rep_), size);
|
1462 |
+
#else
|
1463 |
+
::operator delete(static_cast<void*>(rep_));
|
1464 |
+
#endif
|
1465 |
+
}
|
1466 |
+
rep_ = NULL;
|
1467 |
+
}
|
1468 |
+
|
1469 |
+
template <typename TypeHandler>
|
1470 |
+
inline void RepeatedPtrFieldBase::Swap(RepeatedPtrFieldBase* other) {
|
1471 |
+
if (other->GetArenaNoVirtual() == GetArenaNoVirtual()) {
|
1472 |
+
InternalSwap(other);
|
1473 |
+
} else {
|
1474 |
+
SwapFallback<TypeHandler>(other);
|
1475 |
+
}
|
1476 |
+
}
|
1477 |
+
|
1478 |
+
template <typename TypeHandler>
|
1479 |
+
void RepeatedPtrFieldBase::SwapFallback(RepeatedPtrFieldBase* other) {
|
1480 |
+
GOOGLE_DCHECK(other->GetArenaNoVirtual() != GetArenaNoVirtual());
|
1481 |
+
|
1482 |
+
// Copy semantics in this case. We try to improve efficiency by placing the
|
1483 |
+
// temporary on |other|'s arena so that messages are copied cross-arena only
|
1484 |
+
// once, not twice.
|
1485 |
+
RepeatedPtrFieldBase temp(other->GetArenaNoVirtual());
|
1486 |
+
temp.MergeFrom<TypeHandler>(*this);
|
1487 |
+
this->Clear<TypeHandler>();
|
1488 |
+
this->MergeFrom<TypeHandler>(*other);
|
1489 |
+
other->Clear<TypeHandler>();
|
1490 |
+
other->InternalSwap(&temp);
|
1491 |
+
temp.Destroy<TypeHandler>(); // Frees rep_ if `other` had no arena.
|
1492 |
+
}
|
1493 |
+
|
1494 |
+
inline bool RepeatedPtrFieldBase::empty() const {
|
1495 |
+
return current_size_ == 0;
|
1496 |
+
}
|
1497 |
+
|
1498 |
+
inline int RepeatedPtrFieldBase::size() const {
|
1499 |
+
return current_size_;
|
1500 |
+
}
|
1501 |
+
|
1502 |
+
template <typename TypeHandler>
|
1503 |
+
inline const typename TypeHandler::WeakType&
|
1504 |
+
RepeatedPtrFieldBase::Get(int index) const {
|
1505 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1506 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1507 |
+
return *cast<TypeHandler>(rep_->elements[index]);
|
1508 |
+
}
|
1509 |
+
|
1510 |
+
template <typename TypeHandler>
|
1511 |
+
inline typename TypeHandler::Type*
|
1512 |
+
RepeatedPtrFieldBase::Mutable(int index) {
|
1513 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1514 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1515 |
+
return cast<TypeHandler>(rep_->elements[index]);
|
1516 |
+
}
|
1517 |
+
|
1518 |
+
template <typename TypeHandler>
|
1519 |
+
inline void RepeatedPtrFieldBase::Delete(int index) {
|
1520 |
+
GOOGLE_DCHECK_GE(index, 0);
|
1521 |
+
GOOGLE_DCHECK_LT(index, current_size_);
|
1522 |
+
TypeHandler::Delete(cast<TypeHandler>(rep_->elements[index]), arena_);
|
1523 |
+
}
|
1524 |
+
|
1525 |
+
template <typename TypeHandler>
|
1526 |
+
inline typename TypeHandler::Type* RepeatedPtrFieldBase::Add(
|
1527 |
+
typename TypeHandler::Type* prototype) {
|
1528 |
+
if (rep_ != NULL && current_size_ < rep_->allocated_size) {
|
1529 |
+
return cast<TypeHandler>(rep_->elements[current_size_++]);
|
1530 |
+
}
|
1531 |
+
if (!rep_ || rep_->allocated_size == total_size_) {
|
1532 |
+
Reserve(total_size_ + 1);
|
1533 |
+
}
|
1534 |
+
++rep_->allocated_size;
|
1535 |
+
typename TypeHandler::Type* result =
|
1536 |
+
TypeHandler::NewFromPrototype(prototype, arena_);
|
1537 |
+
rep_->elements[current_size_++] = result;
|
1538 |
+
return result;
|
1539 |
+
}
|
1540 |
+
|
1541 |
+
template <typename TypeHandler>
|
1542 |
+
inline void RepeatedPtrFieldBase::Add(
|
1543 |
+
typename TypeHandler::Type&& value,
|
1544 |
+
std::enable_if<TypeHandler::Moveable>*) {
|
1545 |
+
if (rep_ != NULL && current_size_ < rep_->allocated_size) {
|
1546 |
+
*cast<TypeHandler>(rep_->elements[current_size_++]) = std::move(value);
|
1547 |
+
return;
|
1548 |
+
}
|
1549 |
+
if (!rep_ || rep_->allocated_size == total_size_) {
|
1550 |
+
Reserve(total_size_ + 1);
|
1551 |
+
}
|
1552 |
+
++rep_->allocated_size;
|
1553 |
+
typename TypeHandler::Type* result =
|
1554 |
+
TypeHandler::New(arena_, std::move(value));
|
1555 |
+
rep_->elements[current_size_++] = result;
|
1556 |
+
}
|
1557 |
+
|
1558 |
+
template <typename TypeHandler>
|
1559 |
+
inline void RepeatedPtrFieldBase::RemoveLast() {
|
1560 |
+
GOOGLE_DCHECK_GT(current_size_, 0);
|
1561 |
+
TypeHandler::Clear(cast<TypeHandler>(rep_->elements[--current_size_]));
|
1562 |
+
}
|
1563 |
+
|
1564 |
+
template <typename TypeHandler>
|
1565 |
+
void RepeatedPtrFieldBase::Clear() {
|
1566 |
+
const int n = current_size_;
|
1567 |
+
GOOGLE_DCHECK_GE(n, 0);
|
1568 |
+
if (n > 0) {
|
1569 |
+
void* const* elements = rep_->elements;
|
1570 |
+
int i = 0;
|
1571 |
+
do {
|
1572 |
+
TypeHandler::Clear(cast<TypeHandler>(elements[i++]));
|
1573 |
+
} while (i < n);
|
1574 |
+
current_size_ = 0;
|
1575 |
+
}
|
1576 |
+
}
|
1577 |
+
|
1578 |
+
// To avoid unnecessary code duplication and reduce binary size, we use a
|
1579 |
+
// layered approach to implementing MergeFrom(). The toplevel method is
|
1580 |
+
// templated, so we get a small thunk per concrete message type in the binary.
|
1581 |
+
// This calls a shared implementation with most of the logic, passing a function
|
1582 |
+
// pointer to another type-specific piece of code that calls the object-allocate
|
1583 |
+
// and merge handlers.
|
1584 |
+
template <typename TypeHandler>
|
1585 |
+
inline void RepeatedPtrFieldBase::MergeFrom(const RepeatedPtrFieldBase& other) {
|
1586 |
+
GOOGLE_DCHECK_NE(&other, this);
|
1587 |
+
if (other.current_size_ == 0) return;
|
1588 |
+
MergeFromInternal(
|
1589 |
+
other, &RepeatedPtrFieldBase::MergeFromInnerLoop<TypeHandler>);
|
1590 |
+
}
|
1591 |
+
|
1592 |
+
inline void RepeatedPtrFieldBase::MergeFromInternal(
|
1593 |
+
const RepeatedPtrFieldBase& other,
|
1594 |
+
void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int)) {
|
1595 |
+
// Note: wrapper has already guaranteed that other.rep_ != NULL here.
|
1596 |
+
int other_size = other.current_size_;
|
1597 |
+
void** other_elements = other.rep_->elements;
|
1598 |
+
void** new_elements = InternalExtend(other_size);
|
1599 |
+
int allocated_elems = rep_->allocated_size - current_size_;
|
1600 |
+
(this->*inner_loop)(new_elements, other_elements,
|
1601 |
+
other_size, allocated_elems);
|
1602 |
+
current_size_ += other_size;
|
1603 |
+
if (rep_->allocated_size < current_size_) {
|
1604 |
+
rep_->allocated_size = current_size_;
|
1605 |
+
}
|
1606 |
+
}
|
1607 |
+
|
1608 |
+
// Merges other_elems to our_elems.
|
1609 |
+
template<typename TypeHandler>
|
1610 |
+
void RepeatedPtrFieldBase::MergeFromInnerLoop(
|
1611 |
+
void** our_elems, void** other_elems, int length, int already_allocated) {
|
1612 |
+
// Split into two loops, over ranges [0, allocated) and [allocated, length),
|
1613 |
+
// to avoid a branch within the loop.
|
1614 |
+
for (int i = 0; i < already_allocated && i < length; i++) {
|
1615 |
+
// Already allocated: use existing element.
|
1616 |
+
typename TypeHandler::WeakType* other_elem =
|
1617 |
+
reinterpret_cast<typename TypeHandler::WeakType*>(other_elems[i]);
|
1618 |
+
typename TypeHandler::WeakType* new_elem =
|
1619 |
+
reinterpret_cast<typename TypeHandler::WeakType*>(our_elems[i]);
|
1620 |
+
TypeHandler::Merge(*other_elem, new_elem);
|
1621 |
+
}
|
1622 |
+
Arena* arena = GetArenaNoVirtual();
|
1623 |
+
for (int i = already_allocated; i < length; i++) {
|
1624 |
+
// Not allocated: alloc a new element first, then merge it.
|
1625 |
+
typename TypeHandler::WeakType* other_elem =
|
1626 |
+
reinterpret_cast<typename TypeHandler::WeakType*>(other_elems[i]);
|
1627 |
+
typename TypeHandler::WeakType* new_elem =
|
1628 |
+
TypeHandler::NewFromPrototype(other_elem, arena);
|
1629 |
+
TypeHandler::Merge(*other_elem, new_elem);
|
1630 |
+
our_elems[i] = new_elem;
|
1631 |
+
}
|
1632 |
+
}
|
1633 |
+
|
1634 |
+
template <typename TypeHandler>
|
1635 |
+
inline void RepeatedPtrFieldBase::CopyFrom(const RepeatedPtrFieldBase& other) {
|
1636 |
+
if (&other == this) return;
|
1637 |
+
RepeatedPtrFieldBase::Clear<TypeHandler>();
|
1638 |
+
RepeatedPtrFieldBase::MergeFrom<TypeHandler>(other);
|
1639 |
+
}
|
1640 |
+
|
1641 |
+
inline int RepeatedPtrFieldBase::Capacity() const {
|
1642 |
+
return total_size_;
|
1643 |
+
}
|
1644 |
+
|
1645 |
+
inline void* const* RepeatedPtrFieldBase::raw_data() const {
|
1646 |
+
return rep_ ? rep_->elements : NULL;
|
1647 |
+
}
|
1648 |
+
|
1649 |
+
inline void** RepeatedPtrFieldBase::raw_mutable_data() const {
|
1650 |
+
return rep_ ? const_cast<void**>(rep_->elements) : NULL;
|
1651 |
+
}
|
1652 |
+
|
1653 |
+
template <typename TypeHandler>
|
1654 |
+
inline typename TypeHandler::Type** RepeatedPtrFieldBase::mutable_data() {
|
1655 |
+
// TODO(kenton): Breaks C++ aliasing rules. We should probably remove this
|
1656 |
+
// method entirely.
|
1657 |
+
return reinterpret_cast<typename TypeHandler::Type**>(raw_mutable_data());
|
1658 |
+
}
|
1659 |
+
|
1660 |
+
template <typename TypeHandler>
|
1661 |
+
inline const typename TypeHandler::Type* const*
|
1662 |
+
RepeatedPtrFieldBase::data() const {
|
1663 |
+
// TODO(kenton): Breaks C++ aliasing rules. We should probably remove this
|
1664 |
+
// method entirely.
|
1665 |
+
return reinterpret_cast<const typename TypeHandler::Type* const*>(raw_data());
|
1666 |
+
}
|
1667 |
+
|
1668 |
+
inline void RepeatedPtrFieldBase::SwapElements(int index1, int index2) {
|
1669 |
+
using std::swap; // enable ADL with fallback
|
1670 |
+
swap(rep_->elements[index1], rep_->elements[index2]);
|
1671 |
+
}
|
1672 |
+
|
1673 |
+
template <typename TypeHandler>
|
1674 |
+
inline size_t RepeatedPtrFieldBase::SpaceUsedExcludingSelfLong() const {
|
1675 |
+
size_t allocated_bytes = static_cast<size_t>(total_size_) * sizeof(void*);
|
1676 |
+
if (rep_ != NULL) {
|
1677 |
+
for (int i = 0; i < rep_->allocated_size; ++i) {
|
1678 |
+
allocated_bytes += TypeHandler::SpaceUsedLong(
|
1679 |
+
*cast<TypeHandler>(rep_->elements[i]));
|
1680 |
+
}
|
1681 |
+
allocated_bytes += kRepHeaderSize;
|
1682 |
+
}
|
1683 |
+
return allocated_bytes;
|
1684 |
+
}
|
1685 |
+
|
1686 |
+
template <typename TypeHandler>
|
1687 |
+
inline typename TypeHandler::Type* RepeatedPtrFieldBase::AddFromCleared() {
|
1688 |
+
if (rep_ != NULL && current_size_ < rep_->allocated_size) {
|
1689 |
+
return cast<TypeHandler>(rep_->elements[current_size_++]);
|
1690 |
+
} else {
|
1691 |
+
return NULL;
|
1692 |
+
}
|
1693 |
+
}
|
1694 |
+
|
1695 |
+
// AddAllocated version that implements arena-safe copying behavior.
|
1696 |
+
template <typename TypeHandler>
|
1697 |
+
void RepeatedPtrFieldBase::AddAllocatedInternal(
|
1698 |
+
typename TypeHandler::Type* value,
|
1699 |
+
std::true_type) {
|
1700 |
+
Arena* element_arena = reinterpret_cast<Arena*>(
|
1701 |
+
TypeHandler::GetMaybeArenaPointer(value));
|
1702 |
+
Arena* arena = GetArenaNoVirtual();
|
1703 |
+
if (arena == element_arena && rep_ &&
|
1704 |
+
rep_->allocated_size < total_size_) {
|
1705 |
+
// Fast path: underlying arena representation (tagged pointer) is equal to
|
1706 |
+
// our arena pointer, and we can add to array without resizing it (at least
|
1707 |
+
// one slot that is not allocated).
|
1708 |
+
void** elems = rep_->elements;
|
1709 |
+
if (current_size_ < rep_->allocated_size) {
|
1710 |
+
// Make space at [current] by moving first allocated element to end of
|
1711 |
+
// allocated list.
|
1712 |
+
elems[rep_->allocated_size] = elems[current_size_];
|
1713 |
+
}
|
1714 |
+
elems[current_size_] = value;
|
1715 |
+
current_size_ = current_size_ + 1;
|
1716 |
+
rep_->allocated_size = rep_->allocated_size + 1;
|
1717 |
+
} else {
|
1718 |
+
AddAllocatedSlowWithCopy<TypeHandler>(
|
1719 |
+
value, TypeHandler::GetArena(value), arena);
|
1720 |
+
}
|
1721 |
+
}
|
1722 |
+
|
1723 |
+
// Slowpath handles all cases, copying if necessary.
|
1724 |
+
template<typename TypeHandler>
|
1725 |
+
void RepeatedPtrFieldBase::AddAllocatedSlowWithCopy(
|
1726 |
+
// Pass value_arena and my_arena to avoid duplicate virtual call (value) or
|
1727 |
+
// load (mine).
|
1728 |
+
typename TypeHandler::Type* value, Arena* value_arena, Arena* my_arena) {
|
1729 |
+
// Ensure that either the value is in the same arena, or if not, we do the
|
1730 |
+
// appropriate thing: Own() it (if it's on heap and we're in an arena) or copy
|
1731 |
+
// it to our arena/heap (otherwise).
|
1732 |
+
if (my_arena != NULL && value_arena == NULL) {
|
1733 |
+
my_arena->Own(value);
|
1734 |
+
} else if (my_arena != value_arena) {
|
1735 |
+
typename TypeHandler::Type* new_value =
|
1736 |
+
TypeHandler::NewFromPrototype(value, my_arena);
|
1737 |
+
TypeHandler::Merge(*value, new_value);
|
1738 |
+
TypeHandler::Delete(value, value_arena);
|
1739 |
+
value = new_value;
|
1740 |
+
}
|
1741 |
+
|
1742 |
+
UnsafeArenaAddAllocated<TypeHandler>(value);
|
1743 |
+
}
|
1744 |
+
|
1745 |
+
// AddAllocated version that does not implement arena-safe copying behavior.
|
1746 |
+
template <typename TypeHandler>
|
1747 |
+
void RepeatedPtrFieldBase::AddAllocatedInternal(
|
1748 |
+
typename TypeHandler::Type* value,
|
1749 |
+
std::false_type) {
|
1750 |
+
if (rep_ && rep_->allocated_size < total_size_) {
|
1751 |
+
// Fast path: underlying arena representation (tagged pointer) is equal to
|
1752 |
+
// our arena pointer, and we can add to array without resizing it (at least
|
1753 |
+
// one slot that is not allocated).
|
1754 |
+
void** elems = rep_->elements;
|
1755 |
+
if (current_size_ < rep_->allocated_size) {
|
1756 |
+
// Make space at [current] by moving first allocated element to end of
|
1757 |
+
// allocated list.
|
1758 |
+
elems[rep_->allocated_size] = elems[current_size_];
|
1759 |
+
}
|
1760 |
+
elems[current_size_] = value;
|
1761 |
+
current_size_ = current_size_ + 1;
|
1762 |
+
++rep_->allocated_size;
|
1763 |
+
} else {
|
1764 |
+
UnsafeArenaAddAllocated<TypeHandler>(value);
|
1765 |
+
}
|
1766 |
+
}
|
1767 |
+
|
1768 |
+
template <typename TypeHandler>
|
1769 |
+
void RepeatedPtrFieldBase::UnsafeArenaAddAllocated(
|
1770 |
+
typename TypeHandler::Type* value) {
|
1771 |
+
// Make room for the new pointer.
|
1772 |
+
if (!rep_ || current_size_ == total_size_) {
|
1773 |
+
// The array is completely full with no cleared objects, so grow it.
|
1774 |
+
Reserve(total_size_ + 1);
|
1775 |
+
++rep_->allocated_size;
|
1776 |
+
} else if (rep_->allocated_size == total_size_) {
|
1777 |
+
// There is no more space in the pointer array because it contains some
|
1778 |
+
// cleared objects awaiting reuse. We don't want to grow the array in this
|
1779 |
+
// case because otherwise a loop calling AddAllocated() followed by Clear()
|
1780 |
+
// would leak memory.
|
1781 |
+
TypeHandler::Delete(
|
1782 |
+
cast<TypeHandler>(rep_->elements[current_size_]), arena_);
|
1783 |
+
} else if (current_size_ < rep_->allocated_size) {
|
1784 |
+
// We have some cleared objects. We don't care about their order, so we
|
1785 |
+
// can just move the first one to the end to make space.
|
1786 |
+
rep_->elements[rep_->allocated_size] = rep_->elements[current_size_];
|
1787 |
+
++rep_->allocated_size;
|
1788 |
+
} else {
|
1789 |
+
// There are no cleared objects.
|
1790 |
+
++rep_->allocated_size;
|
1791 |
+
}
|
1792 |
+
|
1793 |
+
rep_->elements[current_size_++] = value;
|
1794 |
+
}
|
1795 |
+
|
1796 |
+
// ReleaseLast() for types that implement merge/copy behavior.
|
1797 |
+
template <typename TypeHandler>
|
1798 |
+
inline typename TypeHandler::Type*
|
1799 |
+
RepeatedPtrFieldBase::ReleaseLastInternal(std::true_type) {
|
1800 |
+
// First, release an element.
|
1801 |
+
typename TypeHandler::Type* result = UnsafeArenaReleaseLast<TypeHandler>();
|
1802 |
+
// Now perform a copy if we're on an arena.
|
1803 |
+
Arena* arena = GetArenaNoVirtual();
|
1804 |
+
if (arena == NULL) {
|
1805 |
+
return result;
|
1806 |
+
} else {
|
1807 |
+
typename TypeHandler::Type* new_result =
|
1808 |
+
TypeHandler::NewFromPrototype(result, NULL);
|
1809 |
+
TypeHandler::Merge(*result, new_result);
|
1810 |
+
return new_result;
|
1811 |
+
}
|
1812 |
+
}
|
1813 |
+
|
1814 |
+
// ReleaseLast() for types that *do not* implement merge/copy behavior -- this
|
1815 |
+
// is the same as UnsafeArenaReleaseLast(). Note that we GOOGLE_DCHECK-fail if we're on
|
1816 |
+
// an arena, since the user really should implement the copy operation in this
|
1817 |
+
// case.
|
1818 |
+
template <typename TypeHandler>
|
1819 |
+
inline typename TypeHandler::Type*
|
1820 |
+
RepeatedPtrFieldBase::ReleaseLastInternal(std::false_type) {
|
1821 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)
|
1822 |
+
<< "ReleaseLast() called on a RepeatedPtrField that is on an arena, "
|
1823 |
+
<< "with a type that does not implement MergeFrom. This is unsafe; "
|
1824 |
+
<< "please implement MergeFrom for your type.";
|
1825 |
+
return UnsafeArenaReleaseLast<TypeHandler>();
|
1826 |
+
}
|
1827 |
+
|
1828 |
+
template <typename TypeHandler>
|
1829 |
+
inline typename TypeHandler::Type*
|
1830 |
+
RepeatedPtrFieldBase::UnsafeArenaReleaseLast() {
|
1831 |
+
GOOGLE_DCHECK_GT(current_size_, 0);
|
1832 |
+
typename TypeHandler::Type* result =
|
1833 |
+
cast<TypeHandler>(rep_->elements[--current_size_]);
|
1834 |
+
--rep_->allocated_size;
|
1835 |
+
if (current_size_ < rep_->allocated_size) {
|
1836 |
+
// There are cleared elements on the end; replace the removed element
|
1837 |
+
// with the last allocated element.
|
1838 |
+
rep_->elements[current_size_] = rep_->elements[rep_->allocated_size];
|
1839 |
+
}
|
1840 |
+
return result;
|
1841 |
+
}
|
1842 |
+
|
1843 |
+
inline int RepeatedPtrFieldBase::ClearedCount() const {
|
1844 |
+
return rep_ ? (rep_->allocated_size - current_size_) : 0;
|
1845 |
+
}
|
1846 |
+
|
1847 |
+
template <typename TypeHandler>
|
1848 |
+
inline void RepeatedPtrFieldBase::AddCleared(
|
1849 |
+
typename TypeHandler::Type* value) {
|
1850 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)
|
1851 |
+
<< "AddCleared() can only be used on a RepeatedPtrField not on an arena.";
|
1852 |
+
GOOGLE_DCHECK(TypeHandler::GetArena(value) == NULL)
|
1853 |
+
<< "AddCleared() can only accept values not on an arena.";
|
1854 |
+
if (!rep_ || rep_->allocated_size == total_size_) {
|
1855 |
+
Reserve(total_size_ + 1);
|
1856 |
+
}
|
1857 |
+
rep_->elements[rep_->allocated_size++] = value;
|
1858 |
+
}
|
1859 |
+
|
1860 |
+
template <typename TypeHandler>
|
1861 |
+
inline typename TypeHandler::Type* RepeatedPtrFieldBase::ReleaseCleared() {
|
1862 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)
|
1863 |
+
<< "ReleaseCleared() can only be used on a RepeatedPtrField not on "
|
1864 |
+
<< "an arena.";
|
1865 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == NULL);
|
1866 |
+
GOOGLE_DCHECK(rep_ != NULL);
|
1867 |
+
GOOGLE_DCHECK_GT(rep_->allocated_size, current_size_);
|
1868 |
+
return cast<TypeHandler>(rep_->elements[--rep_->allocated_size]);
|
1869 |
+
}
|
1870 |
+
|
1871 |
+
} // namespace internal
|
1872 |
+
|
1873 |
+
// -------------------------------------------------------------------
|
1874 |
+
|
1875 |
+
template <typename Element>
|
1876 |
+
class RepeatedPtrField<Element>::TypeHandler
|
1877 |
+
: public internal::GenericTypeHandler<Element> {
|
1878 |
+
};
|
1879 |
+
|
1880 |
+
template <>
|
1881 |
+
class RepeatedPtrField<string>::TypeHandler
|
1882 |
+
: public internal::StringTypeHandler {
|
1883 |
+
};
|
1884 |
+
|
1885 |
+
template <typename Element>
|
1886 |
+
inline RepeatedPtrField<Element>::RepeatedPtrField()
|
1887 |
+
: RepeatedPtrFieldBase() {}
|
1888 |
+
|
1889 |
+
template <typename Element>
|
1890 |
+
inline RepeatedPtrField<Element>::RepeatedPtrField(::google::protobuf::Arena* arena) :
|
1891 |
+
RepeatedPtrFieldBase(arena) {}
|
1892 |
+
|
1893 |
+
template <typename Element>
|
1894 |
+
inline RepeatedPtrField<Element>::RepeatedPtrField(
|
1895 |
+
const RepeatedPtrField& other)
|
1896 |
+
: RepeatedPtrFieldBase() {
|
1897 |
+
MergeFrom(other);
|
1898 |
+
}
|
1899 |
+
|
1900 |
+
template <typename Element>
|
1901 |
+
template <typename Iter>
|
1902 |
+
inline RepeatedPtrField<Element>::RepeatedPtrField(
|
1903 |
+
Iter begin, const Iter& end) {
|
1904 |
+
int reserve = internal::CalculateReserve(begin, end);
|
1905 |
+
if (reserve != -1) {
|
1906 |
+
Reserve(reserve);
|
1907 |
+
}
|
1908 |
+
for (; begin != end; ++begin) {
|
1909 |
+
*Add() = *begin;
|
1910 |
+
}
|
1911 |
+
}
|
1912 |
+
|
1913 |
+
template <typename Element>
|
1914 |
+
RepeatedPtrField<Element>::~RepeatedPtrField() {
|
1915 |
+
Destroy<TypeHandler>();
|
1916 |
+
}
|
1917 |
+
|
1918 |
+
template <typename Element>
|
1919 |
+
inline RepeatedPtrField<Element>& RepeatedPtrField<Element>::operator=(
|
1920 |
+
const RepeatedPtrField& other) {
|
1921 |
+
if (this != &other)
|
1922 |
+
CopyFrom(other);
|
1923 |
+
return *this;
|
1924 |
+
}
|
1925 |
+
|
1926 |
+
template <typename Element>
|
1927 |
+
inline RepeatedPtrField<Element>::RepeatedPtrField(
|
1928 |
+
RepeatedPtrField&& other) noexcept
|
1929 |
+
: RepeatedPtrField() {
|
1930 |
+
// We don't just call Swap(&other) here because it would perform 3 copies if
|
1931 |
+
// the two fields are on different arenas.
|
1932 |
+
if (other.GetArenaNoVirtual()) {
|
1933 |
+
CopyFrom(other);
|
1934 |
+
} else {
|
1935 |
+
InternalSwap(&other);
|
1936 |
+
}
|
1937 |
+
}
|
1938 |
+
|
1939 |
+
template <typename Element>
|
1940 |
+
inline RepeatedPtrField<Element>& RepeatedPtrField<Element>::operator=(
|
1941 |
+
RepeatedPtrField&& other) noexcept {
|
1942 |
+
// We don't just call Swap(&other) here because it would perform 3 copies if
|
1943 |
+
// the two fields are on different arenas.
|
1944 |
+
if (this != &other) {
|
1945 |
+
if (this->GetArenaNoVirtual() != other.GetArenaNoVirtual()) {
|
1946 |
+
CopyFrom(other);
|
1947 |
+
} else {
|
1948 |
+
InternalSwap(&other);
|
1949 |
+
}
|
1950 |
+
}
|
1951 |
+
return *this;
|
1952 |
+
}
|
1953 |
+
|
1954 |
+
template <typename Element>
|
1955 |
+
inline bool RepeatedPtrField<Element>::empty() const {
|
1956 |
+
return RepeatedPtrFieldBase::empty();
|
1957 |
+
}
|
1958 |
+
|
1959 |
+
template <typename Element>
|
1960 |
+
inline int RepeatedPtrField<Element>::size() const {
|
1961 |
+
return RepeatedPtrFieldBase::size();
|
1962 |
+
}
|
1963 |
+
|
1964 |
+
template <typename Element>
|
1965 |
+
inline const Element& RepeatedPtrField<Element>::Get(int index) const {
|
1966 |
+
return RepeatedPtrFieldBase::Get<TypeHandler>(index);
|
1967 |
+
}
|
1968 |
+
|
1969 |
+
|
1970 |
+
template <typename Element>
|
1971 |
+
inline Element* RepeatedPtrField<Element>::Mutable(int index) {
|
1972 |
+
return RepeatedPtrFieldBase::Mutable<TypeHandler>(index);
|
1973 |
+
}
|
1974 |
+
|
1975 |
+
template <typename Element>
|
1976 |
+
inline Element* RepeatedPtrField<Element>::Add() {
|
1977 |
+
return RepeatedPtrFieldBase::Add<TypeHandler>();
|
1978 |
+
}
|
1979 |
+
|
1980 |
+
template <typename Element>
|
1981 |
+
inline void RepeatedPtrField<Element>::Add(Element&& value) {
|
1982 |
+
RepeatedPtrFieldBase::Add<TypeHandler>(std::move(value));
|
1983 |
+
}
|
1984 |
+
|
1985 |
+
template <typename Element>
|
1986 |
+
inline void RepeatedPtrField<Element>::RemoveLast() {
|
1987 |
+
RepeatedPtrFieldBase::RemoveLast<TypeHandler>();
|
1988 |
+
}
|
1989 |
+
|
1990 |
+
template <typename Element>
|
1991 |
+
inline void RepeatedPtrField<Element>::DeleteSubrange(int start, int num) {
|
1992 |
+
GOOGLE_DCHECK_GE(start, 0);
|
1993 |
+
GOOGLE_DCHECK_GE(num, 0);
|
1994 |
+
GOOGLE_DCHECK_LE(start + num, size());
|
1995 |
+
for (int i = 0; i < num; ++i) {
|
1996 |
+
RepeatedPtrFieldBase::Delete<TypeHandler>(start + i);
|
1997 |
+
}
|
1998 |
+
ExtractSubrange(start, num, NULL);
|
1999 |
+
}
|
2000 |
+
|
2001 |
+
template <typename Element>
|
2002 |
+
inline void RepeatedPtrField<Element>::ExtractSubrange(
|
2003 |
+
int start, int num, Element** elements) {
|
2004 |
+
typename internal::TypeImplementsMergeBehavior<
|
2005 |
+
typename TypeHandler::Type>::type t;
|
2006 |
+
ExtractSubrangeInternal(start, num, elements, t);
|
2007 |
+
}
|
2008 |
+
|
2009 |
+
// ExtractSubrange() implementation for types that implement merge/copy
|
2010 |
+
// behavior.
|
2011 |
+
template <typename Element>
|
2012 |
+
inline void RepeatedPtrField<Element>::ExtractSubrangeInternal(
|
2013 |
+
int start, int num, Element** elements, std::true_type) {
|
2014 |
+
GOOGLE_DCHECK_GE(start, 0);
|
2015 |
+
GOOGLE_DCHECK_GE(num, 0);
|
2016 |
+
GOOGLE_DCHECK_LE(start + num, size());
|
2017 |
+
|
2018 |
+
if (num > 0) {
|
2019 |
+
// Save the values of the removed elements if requested.
|
2020 |
+
if (elements != NULL) {
|
2021 |
+
if (GetArenaNoVirtual() != NULL) {
|
2022 |
+
// If we're on an arena, we perform a copy for each element so that the
|
2023 |
+
// returned elements are heap-allocated.
|
2024 |
+
for (int i = 0; i < num; ++i) {
|
2025 |
+
Element* element = RepeatedPtrFieldBase::
|
2026 |
+
Mutable<TypeHandler>(i + start);
|
2027 |
+
typename TypeHandler::Type* new_value =
|
2028 |
+
TypeHandler::NewFromPrototype(element, NULL);
|
2029 |
+
TypeHandler::Merge(*element, new_value);
|
2030 |
+
elements[i] = new_value;
|
2031 |
+
}
|
2032 |
+
} else {
|
2033 |
+
for (int i = 0; i < num; ++i) {
|
2034 |
+
elements[i] = RepeatedPtrFieldBase::Mutable<TypeHandler>(i + start);
|
2035 |
+
}
|
2036 |
+
}
|
2037 |
+
}
|
2038 |
+
CloseGap(start, num);
|
2039 |
+
}
|
2040 |
+
}
|
2041 |
+
|
2042 |
+
// ExtractSubrange() implementation for types that do not implement merge/copy
|
2043 |
+
// behavior.
|
2044 |
+
template<typename Element>
|
2045 |
+
inline void RepeatedPtrField<Element>::ExtractSubrangeInternal(
|
2046 |
+
int start, int num, Element** elements, std::false_type) {
|
2047 |
+
// This case is identical to UnsafeArenaExtractSubrange(). However, since
|
2048 |
+
// ExtractSubrange() must return heap-allocated objects by contract, and we
|
2049 |
+
// cannot fulfill this contract if we are an on arena, we must GOOGLE_DCHECK() that
|
2050 |
+
// we are not on an arena.
|
2051 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)
|
2052 |
+
<< "ExtractSubrange() when arena is non-NULL is only supported when "
|
2053 |
+
<< "the Element type supplies a MergeFrom() operation to make copies.";
|
2054 |
+
UnsafeArenaExtractSubrange(start, num, elements);
|
2055 |
+
}
|
2056 |
+
|
2057 |
+
template <typename Element>
|
2058 |
+
inline void RepeatedPtrField<Element>::UnsafeArenaExtractSubrange(
|
2059 |
+
int start, int num, Element** elements) {
|
2060 |
+
GOOGLE_DCHECK_GE(start, 0);
|
2061 |
+
GOOGLE_DCHECK_GE(num, 0);
|
2062 |
+
GOOGLE_DCHECK_LE(start + num, size());
|
2063 |
+
|
2064 |
+
if (num > 0) {
|
2065 |
+
// Save the values of the removed elements if requested.
|
2066 |
+
if (elements != NULL) {
|
2067 |
+
for (int i = 0; i < num; ++i) {
|
2068 |
+
elements[i] = RepeatedPtrFieldBase::Mutable<TypeHandler>(i + start);
|
2069 |
+
}
|
2070 |
+
}
|
2071 |
+
CloseGap(start, num);
|
2072 |
+
}
|
2073 |
+
}
|
2074 |
+
|
2075 |
+
template <typename Element>
|
2076 |
+
inline void RepeatedPtrField<Element>::Clear() {
|
2077 |
+
RepeatedPtrFieldBase::Clear<TypeHandler>();
|
2078 |
+
}
|
2079 |
+
|
2080 |
+
template <typename Element>
|
2081 |
+
inline void RepeatedPtrField<Element>::MergeFrom(
|
2082 |
+
const RepeatedPtrField& other) {
|
2083 |
+
RepeatedPtrFieldBase::MergeFrom<TypeHandler>(other);
|
2084 |
+
}
|
2085 |
+
|
2086 |
+
template <typename Element>
|
2087 |
+
inline void RepeatedPtrField<Element>::CopyFrom(
|
2088 |
+
const RepeatedPtrField& other) {
|
2089 |
+
RepeatedPtrFieldBase::CopyFrom<TypeHandler>(other);
|
2090 |
+
}
|
2091 |
+
|
2092 |
+
template <typename Element>
|
2093 |
+
inline typename RepeatedPtrField<Element>::iterator
|
2094 |
+
RepeatedPtrField<Element>::erase(const_iterator position) {
|
2095 |
+
return erase(position, position + 1);
|
2096 |
+
}
|
2097 |
+
|
2098 |
+
template <typename Element>
|
2099 |
+
inline typename RepeatedPtrField<Element>::iterator
|
2100 |
+
RepeatedPtrField<Element>::erase(const_iterator first, const_iterator last) {
|
2101 |
+
size_type pos_offset = std::distance(cbegin(), first);
|
2102 |
+
size_type last_offset = std::distance(cbegin(), last);
|
2103 |
+
DeleteSubrange(pos_offset, last_offset - pos_offset);
|
2104 |
+
return begin() + pos_offset;
|
2105 |
+
}
|
2106 |
+
|
2107 |
+
template <typename Element>
|
2108 |
+
inline Element** RepeatedPtrField<Element>::mutable_data() {
|
2109 |
+
return RepeatedPtrFieldBase::mutable_data<TypeHandler>();
|
2110 |
+
}
|
2111 |
+
|
2112 |
+
template <typename Element>
|
2113 |
+
inline const Element* const* RepeatedPtrField<Element>::data() const {
|
2114 |
+
return RepeatedPtrFieldBase::data<TypeHandler>();
|
2115 |
+
}
|
2116 |
+
|
2117 |
+
template <typename Element>
|
2118 |
+
inline void RepeatedPtrField<Element>::Swap(RepeatedPtrField* other) {
|
2119 |
+
if (this == other)
|
2120 |
+
return;
|
2121 |
+
RepeatedPtrFieldBase::Swap<TypeHandler>(other);
|
2122 |
+
}
|
2123 |
+
|
2124 |
+
template <typename Element>
|
2125 |
+
inline void RepeatedPtrField<Element>::UnsafeArenaSwap(
|
2126 |
+
RepeatedPtrField* other) {
|
2127 |
+
if (this == other)
|
2128 |
+
return;
|
2129 |
+
RepeatedPtrFieldBase::InternalSwap(other);
|
2130 |
+
}
|
2131 |
+
|
2132 |
+
template <typename Element>
|
2133 |
+
inline void RepeatedPtrField<Element>::SwapElements(int index1, int index2) {
|
2134 |
+
RepeatedPtrFieldBase::SwapElements(index1, index2);
|
2135 |
+
}
|
2136 |
+
|
2137 |
+
template <typename Element>
|
2138 |
+
inline Arena* RepeatedPtrField<Element>::GetArenaNoVirtual() const {
|
2139 |
+
return RepeatedPtrFieldBase::GetArenaNoVirtual();
|
2140 |
+
}
|
2141 |
+
|
2142 |
+
template <typename Element>
|
2143 |
+
inline size_t RepeatedPtrField<Element>::SpaceUsedExcludingSelfLong() const {
|
2144 |
+
return RepeatedPtrFieldBase::SpaceUsedExcludingSelfLong<TypeHandler>();
|
2145 |
+
}
|
2146 |
+
|
2147 |
+
template <typename Element>
|
2148 |
+
inline void RepeatedPtrField<Element>::AddAllocated(Element* value) {
|
2149 |
+
RepeatedPtrFieldBase::AddAllocated<TypeHandler>(value);
|
2150 |
+
}
|
2151 |
+
|
2152 |
+
template <typename Element>
|
2153 |
+
inline void RepeatedPtrField<Element>::UnsafeArenaAddAllocated(Element* value) {
|
2154 |
+
RepeatedPtrFieldBase::UnsafeArenaAddAllocated<TypeHandler>(value);
|
2155 |
+
}
|
2156 |
+
|
2157 |
+
template <typename Element>
|
2158 |
+
inline Element* RepeatedPtrField<Element>::ReleaseLast() {
|
2159 |
+
return RepeatedPtrFieldBase::ReleaseLast<TypeHandler>();
|
2160 |
+
}
|
2161 |
+
|
2162 |
+
template <typename Element>
|
2163 |
+
inline Element* RepeatedPtrField<Element>::UnsafeArenaReleaseLast() {
|
2164 |
+
return RepeatedPtrFieldBase::UnsafeArenaReleaseLast<TypeHandler>();
|
2165 |
+
}
|
2166 |
+
|
2167 |
+
template <typename Element>
|
2168 |
+
inline int RepeatedPtrField<Element>::ClearedCount() const {
|
2169 |
+
return RepeatedPtrFieldBase::ClearedCount();
|
2170 |
+
}
|
2171 |
+
|
2172 |
+
template <typename Element>
|
2173 |
+
inline void RepeatedPtrField<Element>::AddCleared(Element* value) {
|
2174 |
+
return RepeatedPtrFieldBase::AddCleared<TypeHandler>(value);
|
2175 |
+
}
|
2176 |
+
|
2177 |
+
template <typename Element>
|
2178 |
+
inline Element* RepeatedPtrField<Element>::ReleaseCleared() {
|
2179 |
+
return RepeatedPtrFieldBase::ReleaseCleared<TypeHandler>();
|
2180 |
+
}
|
2181 |
+
|
2182 |
+
template <typename Element>
|
2183 |
+
inline void RepeatedPtrField<Element>::Reserve(int new_size) {
|
2184 |
+
return RepeatedPtrFieldBase::Reserve(new_size);
|
2185 |
+
}
|
2186 |
+
|
2187 |
+
template <typename Element>
|
2188 |
+
inline int RepeatedPtrField<Element>::Capacity() const {
|
2189 |
+
return RepeatedPtrFieldBase::Capacity();
|
2190 |
+
}
|
2191 |
+
|
2192 |
+
// -------------------------------------------------------------------
|
2193 |
+
|
2194 |
+
namespace internal {
|
2195 |
+
|
2196 |
+
// STL-like iterator implementation for RepeatedPtrField. You should not
|
2197 |
+
// refer to this class directly; use RepeatedPtrField<T>::iterator instead.
|
2198 |
+
//
|
2199 |
+
// The iterator for RepeatedPtrField<T>, RepeatedPtrIterator<T>, is
|
2200 |
+
// very similar to iterator_ptr<T**> in util/gtl/iterator_adaptors.h,
|
2201 |
+
// but adds random-access operators and is modified to wrap a void** base
|
2202 |
+
// iterator (since RepeatedPtrField stores its array as a void* array and
|
2203 |
+
// casting void** to T** would violate C++ aliasing rules).
|
2204 |
+
//
|
2205 |
+
// This code based on net/proto/proto-array-internal.h by Jeffrey Yasskin
|
2206 |
+
// ([email protected]).
|
2207 |
+
template<typename Element>
|
2208 |
+
class RepeatedPtrIterator
|
2209 |
+
: public std::iterator<
|
2210 |
+
std::random_access_iterator_tag, Element> {
|
2211 |
+
public:
|
2212 |
+
typedef RepeatedPtrIterator<Element> iterator;
|
2213 |
+
typedef std::iterator<
|
2214 |
+
std::random_access_iterator_tag, Element> superclass;
|
2215 |
+
|
2216 |
+
// Shadow the value_type in std::iterator<> because const_iterator::value_type
|
2217 |
+
// needs to be T, not const T.
|
2218 |
+
typedef typename std::remove_const<Element>::type value_type;
|
2219 |
+
|
2220 |
+
// Let the compiler know that these are type names, so we don't have to
|
2221 |
+
// write "typename" in front of them everywhere.
|
2222 |
+
typedef typename superclass::reference reference;
|
2223 |
+
typedef typename superclass::pointer pointer;
|
2224 |
+
typedef typename superclass::difference_type difference_type;
|
2225 |
+
|
2226 |
+
RepeatedPtrIterator() : it_(NULL) {}
|
2227 |
+
explicit RepeatedPtrIterator(void* const* it) : it_(it) {}
|
2228 |
+
|
2229 |
+
// Allow "upcasting" from RepeatedPtrIterator<T**> to
|
2230 |
+
// RepeatedPtrIterator<const T*const*>.
|
2231 |
+
template<typename OtherElement>
|
2232 |
+
RepeatedPtrIterator(const RepeatedPtrIterator<OtherElement>& other)
|
2233 |
+
: it_(other.it_) {
|
2234 |
+
// Force a compiler error if the other type is not convertible to ours.
|
2235 |
+
if (false) {
|
2236 |
+
implicit_cast<Element*>(static_cast<OtherElement*>(nullptr));
|
2237 |
+
}
|
2238 |
+
}
|
2239 |
+
|
2240 |
+
// dereferenceable
|
2241 |
+
reference operator*() const { return *reinterpret_cast<Element*>(*it_); }
|
2242 |
+
pointer operator->() const { return &(operator*()); }
|
2243 |
+
|
2244 |
+
// {inc,dec}rementable
|
2245 |
+
iterator& operator++() { ++it_; return *this; }
|
2246 |
+
iterator operator++(int) { return iterator(it_++); }
|
2247 |
+
iterator& operator--() { --it_; return *this; }
|
2248 |
+
iterator operator--(int) { return iterator(it_--); }
|
2249 |
+
|
2250 |
+
// equality_comparable
|
2251 |
+
bool operator==(const iterator& x) const { return it_ == x.it_; }
|
2252 |
+
bool operator!=(const iterator& x) const { return it_ != x.it_; }
|
2253 |
+
|
2254 |
+
// less_than_comparable
|
2255 |
+
bool operator<(const iterator& x) const { return it_ < x.it_; }
|
2256 |
+
bool operator<=(const iterator& x) const { return it_ <= x.it_; }
|
2257 |
+
bool operator>(const iterator& x) const { return it_ > x.it_; }
|
2258 |
+
bool operator>=(const iterator& x) const { return it_ >= x.it_; }
|
2259 |
+
|
2260 |
+
// addable, subtractable
|
2261 |
+
iterator& operator+=(difference_type d) {
|
2262 |
+
it_ += d;
|
2263 |
+
return *this;
|
2264 |
+
}
|
2265 |
+
friend iterator operator+(iterator it, const difference_type d) {
|
2266 |
+
it += d;
|
2267 |
+
return it;
|
2268 |
+
}
|
2269 |
+
friend iterator operator+(const difference_type d, iterator it) {
|
2270 |
+
it += d;
|
2271 |
+
return it;
|
2272 |
+
}
|
2273 |
+
iterator& operator-=(difference_type d) {
|
2274 |
+
it_ -= d;
|
2275 |
+
return *this;
|
2276 |
+
}
|
2277 |
+
friend iterator operator-(iterator it, difference_type d) {
|
2278 |
+
it -= d;
|
2279 |
+
return it;
|
2280 |
+
}
|
2281 |
+
|
2282 |
+
// indexable
|
2283 |
+
reference operator[](difference_type d) const { return *(*this + d); }
|
2284 |
+
|
2285 |
+
// random access iterator
|
2286 |
+
difference_type operator-(const iterator& x) const { return it_ - x.it_; }
|
2287 |
+
|
2288 |
+
private:
|
2289 |
+
template<typename OtherElement>
|
2290 |
+
friend class RepeatedPtrIterator;
|
2291 |
+
|
2292 |
+
// The internal iterator.
|
2293 |
+
void* const* it_;
|
2294 |
+
};
|
2295 |
+
|
2296 |
+
// Provide an iterator that operates on pointers to the underlying objects
|
2297 |
+
// rather than the objects themselves as RepeatedPtrIterator does.
|
2298 |
+
// Consider using this when working with stl algorithms that change
|
2299 |
+
// the array.
|
2300 |
+
// The VoidPtr template parameter holds the type-agnostic pointer value
|
2301 |
+
// referenced by the iterator. It should either be "void *" for a mutable
|
2302 |
+
// iterator, or "const void* const" for a constant iterator.
|
2303 |
+
template <typename Element, typename VoidPtr>
|
2304 |
+
class RepeatedPtrOverPtrsIterator
|
2305 |
+
: public std::iterator<std::random_access_iterator_tag, Element> {
|
2306 |
+
public:
|
2307 |
+
typedef RepeatedPtrOverPtrsIterator<Element, VoidPtr> iterator;
|
2308 |
+
typedef std::iterator<std::random_access_iterator_tag, Element> superclass;
|
2309 |
+
|
2310 |
+
// Shadow the value_type in std::iterator<> because const_iterator::value_type
|
2311 |
+
// needs to be T, not const T.
|
2312 |
+
typedef typename std::remove_const<Element>::type value_type;
|
2313 |
+
|
2314 |
+
// Let the compiler know that these are type names, so we don't have to
|
2315 |
+
// write "typename" in front of them everywhere.
|
2316 |
+
typedef typename superclass::reference reference;
|
2317 |
+
typedef typename superclass::pointer pointer;
|
2318 |
+
typedef typename superclass::difference_type difference_type;
|
2319 |
+
|
2320 |
+
RepeatedPtrOverPtrsIterator() : it_(NULL) {}
|
2321 |
+
explicit RepeatedPtrOverPtrsIterator(VoidPtr* it) : it_(it) {}
|
2322 |
+
|
2323 |
+
// dereferenceable
|
2324 |
+
reference operator*() const { return *reinterpret_cast<Element*>(it_); }
|
2325 |
+
pointer operator->() const { return &(operator*()); }
|
2326 |
+
|
2327 |
+
// {inc,dec}rementable
|
2328 |
+
iterator& operator++() { ++it_; return *this; }
|
2329 |
+
iterator operator++(int) { return iterator(it_++); }
|
2330 |
+
iterator& operator--() { --it_; return *this; }
|
2331 |
+
iterator operator--(int) { return iterator(it_--); }
|
2332 |
+
|
2333 |
+
// equality_comparable
|
2334 |
+
bool operator==(const iterator& x) const { return it_ == x.it_; }
|
2335 |
+
bool operator!=(const iterator& x) const { return it_ != x.it_; }
|
2336 |
+
|
2337 |
+
// less_than_comparable
|
2338 |
+
bool operator<(const iterator& x) const { return it_ < x.it_; }
|
2339 |
+
bool operator<=(const iterator& x) const { return it_ <= x.it_; }
|
2340 |
+
bool operator>(const iterator& x) const { return it_ > x.it_; }
|
2341 |
+
bool operator>=(const iterator& x) const { return it_ >= x.it_; }
|
2342 |
+
|
2343 |
+
// addable, subtractable
|
2344 |
+
iterator& operator+=(difference_type d) {
|
2345 |
+
it_ += d;
|
2346 |
+
return *this;
|
2347 |
+
}
|
2348 |
+
friend iterator operator+(iterator it, difference_type d) {
|
2349 |
+
it += d;
|
2350 |
+
return it;
|
2351 |
+
}
|
2352 |
+
friend iterator operator+(difference_type d, iterator it) {
|
2353 |
+
it += d;
|
2354 |
+
return it;
|
2355 |
+
}
|
2356 |
+
iterator& operator-=(difference_type d) {
|
2357 |
+
it_ -= d;
|
2358 |
+
return *this;
|
2359 |
+
}
|
2360 |
+
friend iterator operator-(iterator it, difference_type d) {
|
2361 |
+
it -= d;
|
2362 |
+
return it;
|
2363 |
+
}
|
2364 |
+
|
2365 |
+
// indexable
|
2366 |
+
reference operator[](difference_type d) const { return *(*this + d); }
|
2367 |
+
|
2368 |
+
// random access iterator
|
2369 |
+
difference_type operator-(const iterator& x) const { return it_ - x.it_; }
|
2370 |
+
|
2371 |
+
private:
|
2372 |
+
template<typename OtherElement>
|
2373 |
+
friend class RepeatedPtrIterator;
|
2374 |
+
|
2375 |
+
// The internal iterator.
|
2376 |
+
VoidPtr* it_;
|
2377 |
+
};
|
2378 |
+
|
2379 |
+
void RepeatedPtrFieldBase::InternalSwap(RepeatedPtrFieldBase* other) {
|
2380 |
+
GOOGLE_DCHECK(this != other);
|
2381 |
+
GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());
|
2382 |
+
|
2383 |
+
std::swap(rep_, other->rep_);
|
2384 |
+
std::swap(current_size_, other->current_size_);
|
2385 |
+
std::swap(total_size_, other->total_size_);
|
2386 |
+
}
|
2387 |
+
|
2388 |
+
} // namespace internal
|
2389 |
+
|
2390 |
+
template <typename Element>
|
2391 |
+
inline typename RepeatedPtrField<Element>::iterator
|
2392 |
+
RepeatedPtrField<Element>::begin() {
|
2393 |
+
return iterator(raw_data());
|
2394 |
+
}
|
2395 |
+
template <typename Element>
|
2396 |
+
inline typename RepeatedPtrField<Element>::const_iterator
|
2397 |
+
RepeatedPtrField<Element>::begin() const {
|
2398 |
+
return iterator(raw_data());
|
2399 |
+
}
|
2400 |
+
template <typename Element>
|
2401 |
+
inline typename RepeatedPtrField<Element>::const_iterator
|
2402 |
+
RepeatedPtrField<Element>::cbegin() const {
|
2403 |
+
return begin();
|
2404 |
+
}
|
2405 |
+
template <typename Element>
|
2406 |
+
inline typename RepeatedPtrField<Element>::iterator
|
2407 |
+
RepeatedPtrField<Element>::end() {
|
2408 |
+
return iterator(raw_data() + size());
|
2409 |
+
}
|
2410 |
+
template <typename Element>
|
2411 |
+
inline typename RepeatedPtrField<Element>::const_iterator
|
2412 |
+
RepeatedPtrField<Element>::end() const {
|
2413 |
+
return iterator(raw_data() + size());
|
2414 |
+
}
|
2415 |
+
template <typename Element>
|
2416 |
+
inline typename RepeatedPtrField<Element>::const_iterator
|
2417 |
+
RepeatedPtrField<Element>::cend() const {
|
2418 |
+
return end();
|
2419 |
+
}
|
2420 |
+
|
2421 |
+
template <typename Element>
|
2422 |
+
inline typename RepeatedPtrField<Element>::pointer_iterator
|
2423 |
+
RepeatedPtrField<Element>::pointer_begin() {
|
2424 |
+
return pointer_iterator(raw_mutable_data());
|
2425 |
+
}
|
2426 |
+
template <typename Element>
|
2427 |
+
inline typename RepeatedPtrField<Element>::const_pointer_iterator
|
2428 |
+
RepeatedPtrField<Element>::pointer_begin() const {
|
2429 |
+
return const_pointer_iterator(const_cast<const void* const*>(raw_data()));
|
2430 |
+
}
|
2431 |
+
template <typename Element>
|
2432 |
+
inline typename RepeatedPtrField<Element>::pointer_iterator
|
2433 |
+
RepeatedPtrField<Element>::pointer_end() {
|
2434 |
+
return pointer_iterator(raw_mutable_data() + size());
|
2435 |
+
}
|
2436 |
+
template <typename Element>
|
2437 |
+
inline typename RepeatedPtrField<Element>::const_pointer_iterator
|
2438 |
+
RepeatedPtrField<Element>::pointer_end() const {
|
2439 |
+
return const_pointer_iterator(
|
2440 |
+
const_cast<const void* const*>(raw_data() + size()));
|
2441 |
+
}
|
2442 |
+
|
2443 |
+
|
2444 |
+
// Iterators and helper functions that follow the spirit of the STL
|
2445 |
+
// std::back_insert_iterator and std::back_inserter but are tailor-made
|
2446 |
+
// for RepeatedField and RepeatedPtrField. Typical usage would be:
|
2447 |
+
//
|
2448 |
+
// std::copy(some_sequence.begin(), some_sequence.end(),
|
2449 |
+
// google::protobuf::RepeatedFieldBackInserter(proto.mutable_sequence()));
|
2450 |
+
//
|
2451 |
+
// Ported by johannes from util/gtl/proto-array-iterators.h
|
2452 |
+
|
2453 |
+
namespace internal {
|
2454 |
+
// A back inserter for RepeatedField objects.
|
2455 |
+
template<typename T> class RepeatedFieldBackInsertIterator
|
2456 |
+
: public std::iterator<std::output_iterator_tag, T> {
|
2457 |
+
public:
|
2458 |
+
explicit RepeatedFieldBackInsertIterator(
|
2459 |
+
RepeatedField<T>* const mutable_field)
|
2460 |
+
: field_(mutable_field) {
|
2461 |
+
}
|
2462 |
+
RepeatedFieldBackInsertIterator<T>& operator=(const T& value) {
|
2463 |
+
field_->Add(value);
|
2464 |
+
return *this;
|
2465 |
+
}
|
2466 |
+
RepeatedFieldBackInsertIterator<T>& operator*() {
|
2467 |
+
return *this;
|
2468 |
+
}
|
2469 |
+
RepeatedFieldBackInsertIterator<T>& operator++() {
|
2470 |
+
return *this;
|
2471 |
+
}
|
2472 |
+
RepeatedFieldBackInsertIterator<T>& operator++(int /* unused */) {
|
2473 |
+
return *this;
|
2474 |
+
}
|
2475 |
+
|
2476 |
+
private:
|
2477 |
+
RepeatedField<T>* field_;
|
2478 |
+
};
|
2479 |
+
|
2480 |
+
// A back inserter for RepeatedPtrField objects.
|
2481 |
+
template<typename T> class RepeatedPtrFieldBackInsertIterator
|
2482 |
+
: public std::iterator<std::output_iterator_tag, T> {
|
2483 |
+
public:
|
2484 |
+
RepeatedPtrFieldBackInsertIterator(
|
2485 |
+
RepeatedPtrField<T>* const mutable_field)
|
2486 |
+
: field_(mutable_field) {
|
2487 |
+
}
|
2488 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator=(const T& value) {
|
2489 |
+
*field_->Add() = value;
|
2490 |
+
return *this;
|
2491 |
+
}
|
2492 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator=(
|
2493 |
+
const T* const ptr_to_value) {
|
2494 |
+
*field_->Add() = *ptr_to_value;
|
2495 |
+
return *this;
|
2496 |
+
}
|
2497 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator=(T&& value) {
|
2498 |
+
*field_->Add() = std::move(value);
|
2499 |
+
return *this;
|
2500 |
+
}
|
2501 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator*() {
|
2502 |
+
return *this;
|
2503 |
+
}
|
2504 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator++() {
|
2505 |
+
return *this;
|
2506 |
+
}
|
2507 |
+
RepeatedPtrFieldBackInsertIterator<T>& operator++(int /* unused */) {
|
2508 |
+
return *this;
|
2509 |
+
}
|
2510 |
+
|
2511 |
+
private:
|
2512 |
+
RepeatedPtrField<T>* field_;
|
2513 |
+
};
|
2514 |
+
|
2515 |
+
// A back inserter for RepeatedPtrFields that inserts by transferring ownership
|
2516 |
+
// of a pointer.
|
2517 |
+
template<typename T> class AllocatedRepeatedPtrFieldBackInsertIterator
|
2518 |
+
: public std::iterator<std::output_iterator_tag, T> {
|
2519 |
+
public:
|
2520 |
+
explicit AllocatedRepeatedPtrFieldBackInsertIterator(
|
2521 |
+
RepeatedPtrField<T>* const mutable_field)
|
2522 |
+
: field_(mutable_field) {
|
2523 |
+
}
|
2524 |
+
AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator=(
|
2525 |
+
T* const ptr_to_value) {
|
2526 |
+
field_->AddAllocated(ptr_to_value);
|
2527 |
+
return *this;
|
2528 |
+
}
|
2529 |
+
AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator*() {
|
2530 |
+
return *this;
|
2531 |
+
}
|
2532 |
+
AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++() {
|
2533 |
+
return *this;
|
2534 |
+
}
|
2535 |
+
AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++(
|
2536 |
+
int /* unused */) {
|
2537 |
+
return *this;
|
2538 |
+
}
|
2539 |
+
|
2540 |
+
private:
|
2541 |
+
RepeatedPtrField<T>* field_;
|
2542 |
+
};
|
2543 |
+
|
2544 |
+
// Almost identical to AllocatedRepeatedPtrFieldBackInsertIterator. This one
|
2545 |
+
// uses the UnsafeArenaAddAllocated instead.
|
2546 |
+
template<typename T>
|
2547 |
+
class UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator
|
2548 |
+
: public std::iterator<std::output_iterator_tag, T> {
|
2549 |
+
public:
|
2550 |
+
explicit UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator(
|
2551 |
+
::google::protobuf::RepeatedPtrField<T>* const mutable_field)
|
2552 |
+
: field_(mutable_field) {
|
2553 |
+
}
|
2554 |
+
UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator=(
|
2555 |
+
T const* const ptr_to_value) {
|
2556 |
+
field_->UnsafeArenaAddAllocated(const_cast<T*>(ptr_to_value));
|
2557 |
+
return *this;
|
2558 |
+
}
|
2559 |
+
UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator*() {
|
2560 |
+
return *this;
|
2561 |
+
}
|
2562 |
+
UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++() {
|
2563 |
+
return *this;
|
2564 |
+
}
|
2565 |
+
UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++(
|
2566 |
+
int /* unused */) {
|
2567 |
+
return *this;
|
2568 |
+
}
|
2569 |
+
|
2570 |
+
private:
|
2571 |
+
::google::protobuf::RepeatedPtrField<T>* field_;
|
2572 |
+
};
|
2573 |
+
|
2574 |
+
} // namespace internal
|
2575 |
+
|
2576 |
+
// Provides a back insert iterator for RepeatedField instances,
|
2577 |
+
// similar to std::back_inserter().
|
2578 |
+
template<typename T> internal::RepeatedFieldBackInsertIterator<T>
|
2579 |
+
RepeatedFieldBackInserter(RepeatedField<T>* const mutable_field) {
|
2580 |
+
return internal::RepeatedFieldBackInsertIterator<T>(mutable_field);
|
2581 |
+
}
|
2582 |
+
|
2583 |
+
// Provides a back insert iterator for RepeatedPtrField instances,
|
2584 |
+
// similar to std::back_inserter().
|
2585 |
+
template<typename T> internal::RepeatedPtrFieldBackInsertIterator<T>
|
2586 |
+
RepeatedPtrFieldBackInserter(RepeatedPtrField<T>* const mutable_field) {
|
2587 |
+
return internal::RepeatedPtrFieldBackInsertIterator<T>(mutable_field);
|
2588 |
+
}
|
2589 |
+
|
2590 |
+
// Special back insert iterator for RepeatedPtrField instances, just in
|
2591 |
+
// case someone wants to write generic template code that can access both
|
2592 |
+
// RepeatedFields and RepeatedPtrFields using a common name.
|
2593 |
+
template<typename T> internal::RepeatedPtrFieldBackInsertIterator<T>
|
2594 |
+
RepeatedFieldBackInserter(RepeatedPtrField<T>* const mutable_field) {
|
2595 |
+
return internal::RepeatedPtrFieldBackInsertIterator<T>(mutable_field);
|
2596 |
+
}
|
2597 |
+
|
2598 |
+
// Provides a back insert iterator for RepeatedPtrField instances
|
2599 |
+
// similar to std::back_inserter() which transfers the ownership while
|
2600 |
+
// copying elements.
|
2601 |
+
template<typename T> internal::AllocatedRepeatedPtrFieldBackInsertIterator<T>
|
2602 |
+
AllocatedRepeatedPtrFieldBackInserter(
|
2603 |
+
RepeatedPtrField<T>* const mutable_field) {
|
2604 |
+
return internal::AllocatedRepeatedPtrFieldBackInsertIterator<T>(
|
2605 |
+
mutable_field);
|
2606 |
+
}
|
2607 |
+
|
2608 |
+
// Similar to AllocatedRepeatedPtrFieldBackInserter, using
|
2609 |
+
// UnsafeArenaAddAllocated instead of AddAllocated.
|
2610 |
+
// This is slightly faster if that matters. It is also useful in legacy code
|
2611 |
+
// that uses temporary ownership to avoid copies. Example:
|
2612 |
+
// RepeatedPtrField<T> temp_field;
|
2613 |
+
// temp_field.AddAllocated(new T);
|
2614 |
+
// ... // Do something with temp_field
|
2615 |
+
// temp_field.ExtractSubrange(0, temp_field.size(), nullptr);
|
2616 |
+
// If you put temp_field on the arena this fails, because the ownership
|
2617 |
+
// transfers to the arena at the "AddAllocated" call and is not released anymore
|
2618 |
+
// causing a double delete. Using UnsafeArenaAddAllocated prevents this.
|
2619 |
+
template<typename T>
|
2620 |
+
internal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>
|
2621 |
+
UnsafeArenaAllocatedRepeatedPtrFieldBackInserter(
|
2622 |
+
::google::protobuf::RepeatedPtrField<T>* const mutable_field) {
|
2623 |
+
return internal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>(
|
2624 |
+
mutable_field);
|
2625 |
+
}
|
2626 |
+
|
2627 |
+
} // namespace protobuf
|
2628 |
+
|
2629 |
+
} // namespace google
|
2630 |
+
#endif // GOOGLE_PROTOBUF_REPEATED_FIELD_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/bytestream.h
ADDED
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// This file declares the ByteSink and ByteSource abstract interfaces. These
|
32 |
+
// interfaces represent objects that consume (ByteSink) or produce (ByteSource)
|
33 |
+
// a sequence of bytes. Using these abstract interfaces in your APIs can help
|
34 |
+
// make your code work with a variety of input and output types.
|
35 |
+
//
|
36 |
+
// This file also declares the following commonly used implementations of these
|
37 |
+
// interfaces.
|
38 |
+
//
|
39 |
+
// ByteSink:
|
40 |
+
// UncheckedArrayByteSink Writes to an array, without bounds checking
|
41 |
+
// CheckedArrayByteSink Writes to an array, with bounds checking
|
42 |
+
// GrowingArrayByteSink Allocates and writes to a growable buffer
|
43 |
+
// StringByteSink Writes to an STL string
|
44 |
+
// NullByteSink Consumes a never-ending stream of bytes
|
45 |
+
//
|
46 |
+
// ByteSource:
|
47 |
+
// ArrayByteSource Reads from an array or string/StringPiece
|
48 |
+
// LimitedByteSource Limits the number of bytes read from an
|
49 |
+
|
50 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
|
51 |
+
#define GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
|
52 |
+
|
53 |
+
#include <stddef.h>
|
54 |
+
#include <string>
|
55 |
+
|
56 |
+
#include <google/protobuf/stubs/common.h>
|
57 |
+
#include <google/protobuf/stubs/stringpiece.h>
|
58 |
+
|
59 |
+
class CordByteSink;
|
60 |
+
class MemBlock;
|
61 |
+
|
62 |
+
namespace google {
|
63 |
+
namespace protobuf {
|
64 |
+
namespace strings {
|
65 |
+
|
66 |
+
// An abstract interface for an object that consumes a sequence of bytes. This
|
67 |
+
// interface offers a way to append data as well as a Flush() function.
|
68 |
+
//
|
69 |
+
// Example:
|
70 |
+
//
|
71 |
+
// string my_data;
|
72 |
+
// ...
|
73 |
+
// ByteSink* sink = ...
|
74 |
+
// sink->Append(my_data.data(), my_data.size());
|
75 |
+
// sink->Flush();
|
76 |
+
//
|
77 |
+
class LIBPROTOBUF_EXPORT ByteSink {
|
78 |
+
public:
|
79 |
+
ByteSink() {}
|
80 |
+
virtual ~ByteSink() {}
|
81 |
+
|
82 |
+
// Appends the "n" bytes starting at "bytes".
|
83 |
+
virtual void Append(const char* bytes, size_t n) = 0;
|
84 |
+
|
85 |
+
// Flushes internal buffers. The default implemenation does nothing. ByteSink
|
86 |
+
// subclasses may use internal buffers that require calling Flush() at the end
|
87 |
+
// of the stream.
|
88 |
+
virtual void Flush();
|
89 |
+
|
90 |
+
private:
|
91 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSink);
|
92 |
+
};
|
93 |
+
|
94 |
+
// An abstract interface for an object that produces a fixed-size sequence of
|
95 |
+
// bytes.
|
96 |
+
//
|
97 |
+
// Example:
|
98 |
+
//
|
99 |
+
// ByteSource* source = ...
|
100 |
+
// while (source->Available() > 0) {
|
101 |
+
// StringPiece data = source->Peek();
|
102 |
+
// ... do something with "data" ...
|
103 |
+
// source->Skip(data.length());
|
104 |
+
// }
|
105 |
+
//
|
106 |
+
class LIBPROTOBUF_EXPORT ByteSource {
|
107 |
+
public:
|
108 |
+
ByteSource() {}
|
109 |
+
virtual ~ByteSource() {}
|
110 |
+
|
111 |
+
// Returns the number of bytes left to read from the source. Available()
|
112 |
+
// should decrease by N each time Skip(N) is called. Available() may not
|
113 |
+
// increase. Available() returning 0 indicates that the ByteSource is
|
114 |
+
// exhausted.
|
115 |
+
//
|
116 |
+
// Note: Size() may have been a more appropriate name as it's more
|
117 |
+
// indicative of the fixed-size nature of a ByteSource.
|
118 |
+
virtual size_t Available() const = 0;
|
119 |
+
|
120 |
+
// Returns a StringPiece of the next contiguous region of the source. Does not
|
121 |
+
// reposition the source. The returned region is empty iff Available() == 0.
|
122 |
+
//
|
123 |
+
// The returned region is valid until the next call to Skip() or until this
|
124 |
+
// object is destroyed, whichever occurs first.
|
125 |
+
//
|
126 |
+
// The length of the returned StringPiece will be <= Available().
|
127 |
+
virtual StringPiece Peek() = 0;
|
128 |
+
|
129 |
+
// Skips the next n bytes. Invalidates any StringPiece returned by a previous
|
130 |
+
// call to Peek().
|
131 |
+
//
|
132 |
+
// REQUIRES: Available() >= n
|
133 |
+
virtual void Skip(size_t n) = 0;
|
134 |
+
|
135 |
+
// Writes the next n bytes in this ByteSource to the given ByteSink, and
|
136 |
+
// advances this ByteSource past the copied bytes. The default implementation
|
137 |
+
// of this method just copies the bytes normally, but subclasses might
|
138 |
+
// override CopyTo to optimize certain cases.
|
139 |
+
//
|
140 |
+
// REQUIRES: Available() >= n
|
141 |
+
virtual void CopyTo(ByteSink* sink, size_t n);
|
142 |
+
|
143 |
+
private:
|
144 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSource);
|
145 |
+
};
|
146 |
+
|
147 |
+
//
|
148 |
+
// Some commonly used implementations of ByteSink
|
149 |
+
//
|
150 |
+
|
151 |
+
// Implementation of ByteSink that writes to an unsized byte array. No
|
152 |
+
// bounds-checking is performed--it is the caller's responsibility to ensure
|
153 |
+
// that the destination array is large enough.
|
154 |
+
//
|
155 |
+
// Example:
|
156 |
+
//
|
157 |
+
// char buf[10];
|
158 |
+
// UncheckedArrayByteSink sink(buf);
|
159 |
+
// sink.Append("hi", 2); // OK
|
160 |
+
// sink.Append(data, 100); // WOOPS! Overflows buf[10].
|
161 |
+
//
|
162 |
+
class LIBPROTOBUF_EXPORT UncheckedArrayByteSink : public ByteSink {
|
163 |
+
public:
|
164 |
+
explicit UncheckedArrayByteSink(char* dest) : dest_(dest) {}
|
165 |
+
virtual void Append(const char* data, size_t n);
|
166 |
+
|
167 |
+
// Returns the current output pointer so that a caller can see how many bytes
|
168 |
+
// were produced.
|
169 |
+
//
|
170 |
+
// Note: this method is not part of the ByteSink interface.
|
171 |
+
char* CurrentDestination() const { return dest_; }
|
172 |
+
|
173 |
+
private:
|
174 |
+
char* dest_;
|
175 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(UncheckedArrayByteSink);
|
176 |
+
};
|
177 |
+
|
178 |
+
// Implementation of ByteSink that writes to a sized byte array. This sink will
|
179 |
+
// not write more than "capacity" bytes to outbuf. Once "capacity" bytes are
|
180 |
+
// appended, subsequent bytes will be ignored and Overflowed() will return true.
|
181 |
+
// Overflowed() does not cause a runtime error (i.e., it does not CHECK fail).
|
182 |
+
//
|
183 |
+
// Example:
|
184 |
+
//
|
185 |
+
// char buf[10];
|
186 |
+
// CheckedArrayByteSink sink(buf, 10);
|
187 |
+
// sink.Append("hi", 2); // OK
|
188 |
+
// sink.Append(data, 100); // Will only write 8 more bytes
|
189 |
+
//
|
190 |
+
class LIBPROTOBUF_EXPORT CheckedArrayByteSink : public ByteSink {
|
191 |
+
public:
|
192 |
+
CheckedArrayByteSink(char* outbuf, size_t capacity);
|
193 |
+
virtual void Append(const char* bytes, size_t n);
|
194 |
+
|
195 |
+
// Returns the number of bytes actually written to the sink.
|
196 |
+
size_t NumberOfBytesWritten() const { return size_; }
|
197 |
+
|
198 |
+
// Returns true if any bytes were discarded, i.e., if there was an
|
199 |
+
// attempt to write more than 'capacity' bytes.
|
200 |
+
bool Overflowed() const { return overflowed_; }
|
201 |
+
|
202 |
+
private:
|
203 |
+
char* outbuf_;
|
204 |
+
const size_t capacity_;
|
205 |
+
size_t size_;
|
206 |
+
bool overflowed_;
|
207 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CheckedArrayByteSink);
|
208 |
+
};
|
209 |
+
|
210 |
+
// Implementation of ByteSink that allocates an internal buffer (a char array)
|
211 |
+
// and expands it as needed to accommodate appended data (similar to a string),
|
212 |
+
// and allows the caller to take ownership of the internal buffer via the
|
213 |
+
// GetBuffer() method. The buffer returned from GetBuffer() must be deleted by
|
214 |
+
// the caller with delete[]. GetBuffer() also sets the internal buffer to be
|
215 |
+
// empty, and subsequent appends to the sink will create a new buffer. The
|
216 |
+
// destructor will free the internal buffer if GetBuffer() was not called.
|
217 |
+
//
|
218 |
+
// Example:
|
219 |
+
//
|
220 |
+
// GrowingArrayByteSink sink(10);
|
221 |
+
// sink.Append("hi", 2);
|
222 |
+
// sink.Append(data, n);
|
223 |
+
// const char* buf = sink.GetBuffer(); // Ownership transferred
|
224 |
+
// delete[] buf;
|
225 |
+
//
|
226 |
+
class LIBPROTOBUF_EXPORT GrowingArrayByteSink : public strings::ByteSink {
|
227 |
+
public:
|
228 |
+
explicit GrowingArrayByteSink(size_t estimated_size);
|
229 |
+
virtual ~GrowingArrayByteSink();
|
230 |
+
virtual void Append(const char* bytes, size_t n);
|
231 |
+
|
232 |
+
// Returns the allocated buffer, and sets nbytes to its size. The caller takes
|
233 |
+
// ownership of the buffer and must delete it with delete[].
|
234 |
+
char* GetBuffer(size_t* nbytes);
|
235 |
+
|
236 |
+
private:
|
237 |
+
void Expand(size_t amount);
|
238 |
+
void ShrinkToFit();
|
239 |
+
|
240 |
+
size_t capacity_;
|
241 |
+
char* buf_;
|
242 |
+
size_t size_;
|
243 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GrowingArrayByteSink);
|
244 |
+
};
|
245 |
+
|
246 |
+
// Implementation of ByteSink that appends to the given string.
|
247 |
+
// Existing contents of "dest" are not modified; new data is appended.
|
248 |
+
//
|
249 |
+
// Example:
|
250 |
+
//
|
251 |
+
// string dest = "Hello ";
|
252 |
+
// StringByteSink sink(&dest);
|
253 |
+
// sink.Append("World", 5);
|
254 |
+
// assert(dest == "Hello World");
|
255 |
+
//
|
256 |
+
class LIBPROTOBUF_EXPORT StringByteSink : public ByteSink {
|
257 |
+
public:
|
258 |
+
explicit StringByteSink(string* dest) : dest_(dest) {}
|
259 |
+
virtual void Append(const char* data, size_t n);
|
260 |
+
|
261 |
+
private:
|
262 |
+
string* dest_;
|
263 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringByteSink);
|
264 |
+
};
|
265 |
+
|
266 |
+
// Implementation of ByteSink that discards all data.
|
267 |
+
//
|
268 |
+
// Example:
|
269 |
+
//
|
270 |
+
// NullByteSink sink;
|
271 |
+
// sink.Append(data, data.size()); // All data ignored.
|
272 |
+
//
|
273 |
+
class LIBPROTOBUF_EXPORT NullByteSink : public ByteSink {
|
274 |
+
public:
|
275 |
+
NullByteSink() {}
|
276 |
+
virtual void Append(const char *data, size_t n) {}
|
277 |
+
|
278 |
+
private:
|
279 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(NullByteSink);
|
280 |
+
};
|
281 |
+
|
282 |
+
//
|
283 |
+
// Some commonly used implementations of ByteSource
|
284 |
+
//
|
285 |
+
|
286 |
+
// Implementation of ByteSource that reads from a StringPiece.
|
287 |
+
//
|
288 |
+
// Example:
|
289 |
+
//
|
290 |
+
// string data = "Hello";
|
291 |
+
// ArrayByteSource source(data);
|
292 |
+
// assert(source.Available() == 5);
|
293 |
+
// assert(source.Peek() == "Hello");
|
294 |
+
//
|
295 |
+
class LIBPROTOBUF_EXPORT ArrayByteSource : public ByteSource {
|
296 |
+
public:
|
297 |
+
explicit ArrayByteSource(StringPiece s) : input_(s) {}
|
298 |
+
|
299 |
+
virtual size_t Available() const;
|
300 |
+
virtual StringPiece Peek();
|
301 |
+
virtual void Skip(size_t n);
|
302 |
+
|
303 |
+
private:
|
304 |
+
StringPiece input_;
|
305 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayByteSource);
|
306 |
+
};
|
307 |
+
|
308 |
+
// Implementation of ByteSource that wraps another ByteSource, limiting the
|
309 |
+
// number of bytes returned.
|
310 |
+
//
|
311 |
+
// The caller maintains ownership of the underlying source, and may not use the
|
312 |
+
// underlying source while using the LimitByteSource object. The underlying
|
313 |
+
// source's pointer is advanced by n bytes every time this LimitByteSource
|
314 |
+
// object is advanced by n.
|
315 |
+
//
|
316 |
+
// Example:
|
317 |
+
//
|
318 |
+
// string data = "Hello World";
|
319 |
+
// ArrayByteSource abs(data);
|
320 |
+
// assert(abs.Available() == data.size());
|
321 |
+
//
|
322 |
+
// LimitByteSource limit(abs, 5);
|
323 |
+
// assert(limit.Available() == 5);
|
324 |
+
// assert(limit.Peek() == "Hello");
|
325 |
+
//
|
326 |
+
class LIBPROTOBUF_EXPORT LimitByteSource : public ByteSource {
|
327 |
+
public:
|
328 |
+
// Returns at most "limit" bytes from "source".
|
329 |
+
LimitByteSource(ByteSource* source, size_t limit);
|
330 |
+
|
331 |
+
virtual size_t Available() const;
|
332 |
+
virtual StringPiece Peek();
|
333 |
+
virtual void Skip(size_t n);
|
334 |
+
|
335 |
+
// We override CopyTo so that we can forward to the underlying source, in
|
336 |
+
// case it has an efficient implementation of CopyTo.
|
337 |
+
virtual void CopyTo(ByteSink* sink, size_t n);
|
338 |
+
|
339 |
+
private:
|
340 |
+
ByteSource* source_;
|
341 |
+
size_t limit_;
|
342 |
+
};
|
343 |
+
|
344 |
+
} // namespace strings
|
345 |
+
} // namespace protobuf
|
346 |
+
} // namespace google
|
347 |
+
|
348 |
+
#endif // GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/casts.h
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2014 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_CASTS_H__
|
32 |
+
#define GOOGLE_PROTOBUF_CASTS_H__
|
33 |
+
|
34 |
+
#include <type_traits>
|
35 |
+
|
36 |
+
#include <google/protobuf/stubs/common.h>
|
37 |
+
|
38 |
+
namespace google {
|
39 |
+
namespace protobuf {
|
40 |
+
namespace internal {
|
41 |
+
// Use implicit_cast as a safe version of static_cast or const_cast
|
42 |
+
// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
|
43 |
+
// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
|
44 |
+
// a const pointer to Foo).
|
45 |
+
// When you use implicit_cast, the compiler checks that the cast is safe.
|
46 |
+
// Such explicit implicit_casts are necessary in surprisingly many
|
47 |
+
// situations where C++ demands an exact type match instead of an
|
48 |
+
// argument type convertable to a target type.
|
49 |
+
//
|
50 |
+
// The From type can be inferred, so the preferred syntax for using
|
51 |
+
// implicit_cast is the same as for static_cast etc.:
|
52 |
+
//
|
53 |
+
// implicit_cast<ToType>(expr)
|
54 |
+
//
|
55 |
+
// implicit_cast would have been part of the C++ standard library,
|
56 |
+
// but the proposal was submitted too late. It will probably make
|
57 |
+
// its way into the language in the future.
|
58 |
+
template<typename To, typename From>
|
59 |
+
inline To implicit_cast(From const &f) {
|
60 |
+
return f;
|
61 |
+
}
|
62 |
+
|
63 |
+
// When you upcast (that is, cast a pointer from type Foo to type
|
64 |
+
// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts
|
65 |
+
// always succeed. When you downcast (that is, cast a pointer from
|
66 |
+
// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
|
67 |
+
// how do you know the pointer is really of type SubclassOfFoo? It
|
68 |
+
// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
|
69 |
+
// when you downcast, you should use this macro. In debug mode, we
|
70 |
+
// use dynamic_cast<> to double-check the downcast is legal (we die
|
71 |
+
// if it's not). In normal mode, we do the efficient static_cast<>
|
72 |
+
// instead. Thus, it's important to test in debug mode to make sure
|
73 |
+
// the cast is legal!
|
74 |
+
// This is the only place in the code we should use dynamic_cast<>.
|
75 |
+
// In particular, you SHOULDN'T be using dynamic_cast<> in order to
|
76 |
+
// do RTTI (eg code like this:
|
77 |
+
// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
|
78 |
+
// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
|
79 |
+
// You should design the code some other way not to need this.
|
80 |
+
|
81 |
+
template<typename To, typename From> // use like this: down_cast<T*>(foo);
|
82 |
+
inline To down_cast(From* f) { // so we only accept pointers
|
83 |
+
// Ensures that To is a sub-type of From *. This test is here only
|
84 |
+
// for compile-time type checking, and has no overhead in an
|
85 |
+
// optimized build at run-time, as it will be optimized away
|
86 |
+
// completely.
|
87 |
+
if (false) {
|
88 |
+
implicit_cast<From*, To>(0);
|
89 |
+
}
|
90 |
+
|
91 |
+
#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)
|
92 |
+
assert(f == NULL || dynamic_cast<To>(f) != NULL); // RTTI: debug mode only!
|
93 |
+
#endif
|
94 |
+
return static_cast<To>(f);
|
95 |
+
}
|
96 |
+
|
97 |
+
template<typename To, typename From> // use like this: down_cast<T&>(foo);
|
98 |
+
inline To down_cast(From& f) {
|
99 |
+
typedef typename std::remove_reference<To>::type* ToAsPointer;
|
100 |
+
// Ensures that To is a sub-type of From *. This test is here only
|
101 |
+
// for compile-time type checking, and has no overhead in an
|
102 |
+
// optimized build at run-time, as it will be optimized away
|
103 |
+
// completely.
|
104 |
+
if (false) {
|
105 |
+
implicit_cast<From*, ToAsPointer>(0);
|
106 |
+
}
|
107 |
+
|
108 |
+
#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)
|
109 |
+
// RTTI: debug mode only!
|
110 |
+
assert(dynamic_cast<ToAsPointer>(&f) != NULL);
|
111 |
+
#endif
|
112 |
+
return *static_cast<ToAsPointer>(&f);
|
113 |
+
}
|
114 |
+
|
115 |
+
template<typename To, typename From>
|
116 |
+
inline To bit_cast(const From& from) {
|
117 |
+
GOOGLE_COMPILE_ASSERT(sizeof(From) == sizeof(To),
|
118 |
+
bit_cast_with_different_sizes);
|
119 |
+
To dest;
|
120 |
+
memcpy(&dest, &from, sizeof(dest));
|
121 |
+
return dest;
|
122 |
+
}
|
123 |
+
|
124 |
+
} // namespace internal
|
125 |
+
|
126 |
+
// We made these internal so that they would show up as such in the docs,
|
127 |
+
// but we don't want to stick "internal::" in front of them everywhere.
|
128 |
+
using internal::implicit_cast;
|
129 |
+
using internal::down_cast;
|
130 |
+
using internal::bit_cast;
|
131 |
+
|
132 |
+
} // namespace protobuf
|
133 |
+
} // namespace google
|
134 |
+
#endif // GOOGLE_PROTOBUF_CASTS_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/common.h
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda) and others
|
32 |
+
//
|
33 |
+
// Contains basic types and utilities used by the rest of the library.
|
34 |
+
|
35 |
+
#ifndef GOOGLE_PROTOBUF_COMMON_H__
|
36 |
+
#define GOOGLE_PROTOBUF_COMMON_H__
|
37 |
+
|
38 |
+
#include <algorithm>
|
39 |
+
#include <iostream>
|
40 |
+
#include <map>
|
41 |
+
#include <memory>
|
42 |
+
#include <set>
|
43 |
+
#include <string>
|
44 |
+
#include <vector>
|
45 |
+
|
46 |
+
#include <google/protobuf/stubs/port.h>
|
47 |
+
#include <google/protobuf/stubs/macros.h>
|
48 |
+
#include <google/protobuf/stubs/platform_macros.h>
|
49 |
+
|
50 |
+
// TODO(liujisi): Remove the following includes after the include clean-up.
|
51 |
+
#include <google/protobuf/stubs/logging.h>
|
52 |
+
#include <google/protobuf/stubs/mutex.h>
|
53 |
+
#include <google/protobuf/stubs/callback.h>
|
54 |
+
|
55 |
+
#ifndef PROTOBUF_USE_EXCEPTIONS
|
56 |
+
#if defined(_MSC_VER) && defined(_CPPUNWIND)
|
57 |
+
#define PROTOBUF_USE_EXCEPTIONS 1
|
58 |
+
#elif defined(__EXCEPTIONS)
|
59 |
+
#define PROTOBUF_USE_EXCEPTIONS 1
|
60 |
+
#else
|
61 |
+
#define PROTOBUF_USE_EXCEPTIONS 0
|
62 |
+
#endif
|
63 |
+
#endif
|
64 |
+
|
65 |
+
#if PROTOBUF_USE_EXCEPTIONS
|
66 |
+
#include <exception>
|
67 |
+
#endif
|
68 |
+
#if defined(__APPLE__)
|
69 |
+
#include <TargetConditionals.h> // for TARGET_OS_IPHONE
|
70 |
+
#endif
|
71 |
+
|
72 |
+
#if defined(__ANDROID__) || defined(GOOGLE_PROTOBUF_OS_ANDROID) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(GOOGLE_PROTOBUF_OS_IPHONE)
|
73 |
+
#include <pthread.h>
|
74 |
+
#endif
|
75 |
+
|
76 |
+
#if defined(_WIN32) && defined(GetMessage)
|
77 |
+
// Allow GetMessage to be used as a valid method name in protobuf classes.
|
78 |
+
// windows.h defines GetMessage() as a macro. Let's re-define it as an inline
|
79 |
+
// function. The inline function should be equivalent for C++ users.
|
80 |
+
inline BOOL GetMessage_Win32(
|
81 |
+
LPMSG lpMsg, HWND hWnd,
|
82 |
+
UINT wMsgFilterMin, UINT wMsgFilterMax) {
|
83 |
+
return GetMessage(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);
|
84 |
+
}
|
85 |
+
#undef GetMessage
|
86 |
+
inline BOOL GetMessage(
|
87 |
+
LPMSG lpMsg, HWND hWnd,
|
88 |
+
UINT wMsgFilterMin, UINT wMsgFilterMax) {
|
89 |
+
return GetMessage_Win32(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);
|
90 |
+
}
|
91 |
+
#endif
|
92 |
+
|
93 |
+
namespace std {}
|
94 |
+
|
95 |
+
namespace google {
|
96 |
+
namespace protobuf {
|
97 |
+
namespace internal {
|
98 |
+
|
99 |
+
// Some of these constants are macros rather than const ints so that they can
|
100 |
+
// be used in #if directives.
|
101 |
+
|
102 |
+
// The current version, represented as a single integer to make comparison
|
103 |
+
// easier: major * 10^6 + minor * 10^3 + micro
|
104 |
+
#define GOOGLE_PROTOBUF_VERSION 3006001
|
105 |
+
|
106 |
+
// A suffix string for alpha, beta or rc releases. Empty for stable releases.
|
107 |
+
#define GOOGLE_PROTOBUF_VERSION_SUFFIX ""
|
108 |
+
|
109 |
+
// The minimum library version which works with the current version of the
|
110 |
+
// headers.
|
111 |
+
#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION 3006001
|
112 |
+
|
113 |
+
// The minimum header version which works with the current version of
|
114 |
+
// the library. This constant should only be used by protoc's C++ code
|
115 |
+
// generator.
|
116 |
+
static const int kMinHeaderVersionForLibrary = 3006001;
|
117 |
+
|
118 |
+
// The minimum protoc version which works with the current version of the
|
119 |
+
// headers.
|
120 |
+
#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION 3006001
|
121 |
+
|
122 |
+
// The minimum header version which works with the current version of
|
123 |
+
// protoc. This constant should only be used in VerifyVersion().
|
124 |
+
static const int kMinHeaderVersionForProtoc = 3006001;
|
125 |
+
|
126 |
+
// Verifies that the headers and libraries are compatible. Use the macro
|
127 |
+
// below to call this.
|
128 |
+
void LIBPROTOBUF_EXPORT VerifyVersion(int headerVersion, int minLibraryVersion,
|
129 |
+
const char* filename);
|
130 |
+
|
131 |
+
// Converts a numeric version number to a string.
|
132 |
+
std::string LIBPROTOBUF_EXPORT VersionString(int version);
|
133 |
+
|
134 |
+
} // namespace internal
|
135 |
+
|
136 |
+
// Place this macro in your main() function (or somewhere before you attempt
|
137 |
+
// to use the protobuf library) to verify that the version you link against
|
138 |
+
// matches the headers you compiled against. If a version mismatch is
|
139 |
+
// detected, the process will abort.
|
140 |
+
#define GOOGLE_PROTOBUF_VERIFY_VERSION \
|
141 |
+
::google::protobuf::internal::VerifyVersion( \
|
142 |
+
GOOGLE_PROTOBUF_VERSION, GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION, \
|
143 |
+
__FILE__)
|
144 |
+
|
145 |
+
|
146 |
+
// ===================================================================
|
147 |
+
// from google3/util/utf8/public/unilib.h
|
148 |
+
|
149 |
+
class StringPiece;
|
150 |
+
namespace internal {
|
151 |
+
|
152 |
+
// Checks if the buffer contains structurally-valid UTF-8. Implemented in
|
153 |
+
// structurally_valid.cc.
|
154 |
+
LIBPROTOBUF_EXPORT bool IsStructurallyValidUTF8(const char* buf, int len);
|
155 |
+
|
156 |
+
inline bool IsStructurallyValidUTF8(const std::string& str) {
|
157 |
+
return IsStructurallyValidUTF8(str.data(), static_cast<int>(str.length()));
|
158 |
+
}
|
159 |
+
|
160 |
+
// Returns initial number of bytes of structually valid UTF-8.
|
161 |
+
LIBPROTOBUF_EXPORT int UTF8SpnStructurallyValid(const StringPiece& str);
|
162 |
+
|
163 |
+
// Coerce UTF-8 byte string in src_str to be
|
164 |
+
// a structurally-valid equal-length string by selectively
|
165 |
+
// overwriting illegal bytes with replace_char (typically ' ' or '?').
|
166 |
+
// replace_char must be legal printable 7-bit Ascii 0x20..0x7e.
|
167 |
+
// src_str is read-only.
|
168 |
+
//
|
169 |
+
// Returns pointer to output buffer, src_str.data() if no changes were made,
|
170 |
+
// or idst if some bytes were changed. idst is allocated by the caller
|
171 |
+
// and must be at least as big as src_str
|
172 |
+
//
|
173 |
+
// Optimized for: all structurally valid and no byte copying is done.
|
174 |
+
//
|
175 |
+
LIBPROTOBUF_EXPORT char* UTF8CoerceToStructurallyValid(
|
176 |
+
const StringPiece& str, char* dst, char replace_char);
|
177 |
+
|
178 |
+
} // namespace internal
|
179 |
+
|
180 |
+
|
181 |
+
// ===================================================================
|
182 |
+
// Shutdown support.
|
183 |
+
|
184 |
+
// Shut down the entire protocol buffers library, deleting all static-duration
|
185 |
+
// objects allocated by the library or by generated .pb.cc files.
|
186 |
+
//
|
187 |
+
// There are two reasons you might want to call this:
|
188 |
+
// * You use a draconian definition of "memory leak" in which you expect
|
189 |
+
// every single malloc() to have a corresponding free(), even for objects
|
190 |
+
// which live until program exit.
|
191 |
+
// * You are writing a dynamically-loaded library which needs to clean up
|
192 |
+
// after itself when the library is unloaded.
|
193 |
+
//
|
194 |
+
// It is safe to call this multiple times. However, it is not safe to use
|
195 |
+
// any other part of the protocol buffers library after
|
196 |
+
// ShutdownProtobufLibrary() has been called. Furthermore this call is not
|
197 |
+
// thread safe, user needs to synchronize multiple calls.
|
198 |
+
LIBPROTOBUF_EXPORT void ShutdownProtobufLibrary();
|
199 |
+
|
200 |
+
namespace internal {
|
201 |
+
|
202 |
+
// Register a function to be called when ShutdownProtocolBuffers() is called.
|
203 |
+
LIBPROTOBUF_EXPORT void OnShutdown(void (*func)());
|
204 |
+
// Run an arbitrary function on an arg
|
205 |
+
LIBPROTOBUF_EXPORT void OnShutdownRun(void (*f)(const void*), const void* arg);
|
206 |
+
|
207 |
+
template <typename T>
|
208 |
+
T* OnShutdownDelete(T* p) {
|
209 |
+
OnShutdownRun([](const void* p) { delete static_cast<const T*>(p); }, p);
|
210 |
+
return p;
|
211 |
+
}
|
212 |
+
|
213 |
+
} // namespace internal
|
214 |
+
|
215 |
+
#if PROTOBUF_USE_EXCEPTIONS
|
216 |
+
class FatalException : public std::exception {
|
217 |
+
public:
|
218 |
+
FatalException(const char* filename, int line, const std::string& message)
|
219 |
+
: filename_(filename), line_(line), message_(message) {}
|
220 |
+
virtual ~FatalException() throw();
|
221 |
+
|
222 |
+
virtual const char* what() const throw();
|
223 |
+
|
224 |
+
const char* filename() const { return filename_; }
|
225 |
+
int line() const { return line_; }
|
226 |
+
const std::string& message() const { return message_; }
|
227 |
+
|
228 |
+
private:
|
229 |
+
const char* filename_;
|
230 |
+
const int line_;
|
231 |
+
const std::string message_;
|
232 |
+
};
|
233 |
+
#endif
|
234 |
+
|
235 |
+
// This is at the end of the file instead of the beginning to work around a bug
|
236 |
+
// in some versions of MSVC.
|
237 |
+
using std::string;
|
238 |
+
|
239 |
+
} // namespace protobuf
|
240 |
+
} // namespace google
|
241 |
+
|
242 |
+
#endif // GOOGLE_PROTOBUF_COMMON_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/fastmem.h
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2014 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Fast memory copying and comparison routines.
|
32 |
+
// strings::fastmemcmp_inlined() replaces memcmp()
|
33 |
+
// strings::memcpy_inlined() replaces memcpy()
|
34 |
+
// strings::memeq(a, b, n) replaces memcmp(a, b, n) == 0
|
35 |
+
//
|
36 |
+
// strings::*_inlined() routines are inline versions of the
|
37 |
+
// routines exported by this module. Sometimes using the inlined
|
38 |
+
// versions is faster. Measure before using the inlined versions.
|
39 |
+
//
|
40 |
+
// Performance measurement:
|
41 |
+
// strings::fastmemcmp_inlined
|
42 |
+
// Analysis: memcmp, fastmemcmp_inlined, fastmemcmp
|
43 |
+
// 2012-01-30
|
44 |
+
|
45 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
|
46 |
+
#define GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
|
47 |
+
|
48 |
+
#include <stddef.h>
|
49 |
+
#include <stdio.h>
|
50 |
+
#include <string.h>
|
51 |
+
|
52 |
+
#include <google/protobuf/stubs/common.h>
|
53 |
+
|
54 |
+
namespace google {
|
55 |
+
namespace protobuf {
|
56 |
+
namespace internal {
|
57 |
+
|
58 |
+
// Return true if the n bytes at a equal the n bytes at b.
|
59 |
+
// The regions are allowed to overlap.
|
60 |
+
//
|
61 |
+
// The performance is similar to the performance memcmp(), but faster for
|
62 |
+
// moderately-sized inputs, or inputs that share a common prefix and differ
|
63 |
+
// somewhere in their last 8 bytes. Further optimizations can be added later
|
64 |
+
// if it makes sense to do so.:w
|
65 |
+
inline bool memeq(const char* a, const char* b, size_t n) {
|
66 |
+
size_t n_rounded_down = n & ~static_cast<size_t>(7);
|
67 |
+
if (GOOGLE_PREDICT_FALSE(n_rounded_down == 0)) { // n <= 7
|
68 |
+
return memcmp(a, b, n) == 0;
|
69 |
+
}
|
70 |
+
// n >= 8
|
71 |
+
uint64 u = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);
|
72 |
+
uint64 v = GOOGLE_UNALIGNED_LOAD64(a + n - 8) ^ GOOGLE_UNALIGNED_LOAD64(b + n - 8);
|
73 |
+
if ((u | v) != 0) { // The first or last 8 bytes differ.
|
74 |
+
return false;
|
75 |
+
}
|
76 |
+
a += 8;
|
77 |
+
b += 8;
|
78 |
+
n = n_rounded_down - 8;
|
79 |
+
if (n > 128) {
|
80 |
+
// As of 2012, memcmp on x86-64 uses a big unrolled loop with SSE2
|
81 |
+
// instructions, and while we could try to do something faster, it
|
82 |
+
// doesn't seem worth pursuing.
|
83 |
+
return memcmp(a, b, n) == 0;
|
84 |
+
}
|
85 |
+
for (; n >= 16; n -= 16) {
|
86 |
+
uint64 x = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);
|
87 |
+
uint64 y = GOOGLE_UNALIGNED_LOAD64(a + 8) ^ GOOGLE_UNALIGNED_LOAD64(b + 8);
|
88 |
+
if ((x | y) != 0) {
|
89 |
+
return false;
|
90 |
+
}
|
91 |
+
a += 16;
|
92 |
+
b += 16;
|
93 |
+
}
|
94 |
+
// n must be 0 or 8 now because it was a multiple of 8 at the top of the loop.
|
95 |
+
return n == 0 || GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b);
|
96 |
+
}
|
97 |
+
|
98 |
+
inline int fastmemcmp_inlined(const char *a, const char *b, size_t n) {
|
99 |
+
if (n >= 64) {
|
100 |
+
return memcmp(a, b, n);
|
101 |
+
}
|
102 |
+
const char* a_limit = a + n;
|
103 |
+
while (a + sizeof(uint64) <= a_limit &&
|
104 |
+
GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b)) {
|
105 |
+
a += sizeof(uint64);
|
106 |
+
b += sizeof(uint64);
|
107 |
+
}
|
108 |
+
if (a + sizeof(uint32) <= a_limit &&
|
109 |
+
GOOGLE_UNALIGNED_LOAD32(a) == GOOGLE_UNALIGNED_LOAD32(b)) {
|
110 |
+
a += sizeof(uint32);
|
111 |
+
b += sizeof(uint32);
|
112 |
+
}
|
113 |
+
while (a < a_limit) {
|
114 |
+
int d =
|
115 |
+
static_cast<int>(static_cast<uint32>(*a++) - static_cast<uint32>(*b++));
|
116 |
+
if (d) return d;
|
117 |
+
}
|
118 |
+
return 0;
|
119 |
+
}
|
120 |
+
|
121 |
+
// The standard memcpy operation is slow for variable small sizes.
|
122 |
+
// This implementation inlines the optimal realization for sizes 1 to 16.
|
123 |
+
// To avoid code bloat don't use it in case of not performance-critical spots,
|
124 |
+
// nor when you don't expect very frequent values of size <= 16.
|
125 |
+
inline void memcpy_inlined(char *dst, const char *src, size_t size) {
|
126 |
+
// Compiler inlines code with minimal amount of data movement when third
|
127 |
+
// parameter of memcpy is a constant.
|
128 |
+
switch (size) {
|
129 |
+
case 1: memcpy(dst, src, 1); break;
|
130 |
+
case 2: memcpy(dst, src, 2); break;
|
131 |
+
case 3: memcpy(dst, src, 3); break;
|
132 |
+
case 4: memcpy(dst, src, 4); break;
|
133 |
+
case 5: memcpy(dst, src, 5); break;
|
134 |
+
case 6: memcpy(dst, src, 6); break;
|
135 |
+
case 7: memcpy(dst, src, 7); break;
|
136 |
+
case 8: memcpy(dst, src, 8); break;
|
137 |
+
case 9: memcpy(dst, src, 9); break;
|
138 |
+
case 10: memcpy(dst, src, 10); break;
|
139 |
+
case 11: memcpy(dst, src, 11); break;
|
140 |
+
case 12: memcpy(dst, src, 12); break;
|
141 |
+
case 13: memcpy(dst, src, 13); break;
|
142 |
+
case 14: memcpy(dst, src, 14); break;
|
143 |
+
case 15: memcpy(dst, src, 15); break;
|
144 |
+
case 16: memcpy(dst, src, 16); break;
|
145 |
+
default: memcpy(dst, src, size); break;
|
146 |
+
}
|
147 |
+
}
|
148 |
+
|
149 |
+
} // namespace internal
|
150 |
+
} // namespace protobuf
|
151 |
+
} // namespace google
|
152 |
+
|
153 |
+
#endif // GOOGLE_PROTOBUF_STUBS_FASTMEM_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/hash.h
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
//
|
33 |
+
// Deals with the fact that hash_map is not defined everywhere.
|
34 |
+
|
35 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_HASH_H__
|
36 |
+
#define GOOGLE_PROTOBUF_STUBS_HASH_H__
|
37 |
+
|
38 |
+
#include <string.h>
|
39 |
+
#include <google/protobuf/stubs/common.h>
|
40 |
+
|
41 |
+
#define GOOGLE_PROTOBUF_HAVE_HASH_MAP 1
|
42 |
+
#define GOOGLE_PROTOBUF_HAVE_HASH_SET 1
|
43 |
+
|
44 |
+
// Use C++11 unordered_{map|set} if available.
|
45 |
+
#if ((defined(_LIBCPP_STD_VER) && _LIBCPP_STD_VER >= 11) || \
|
46 |
+
(((__cplusplus >= 201103L) || defined(__GXX_EXPERIMENTAL_CXX0X)) && \
|
47 |
+
(__GLIBCXX__ > 20090421)))
|
48 |
+
# define GOOGLE_PROTOBUF_HAS_CXX11_HASH
|
49 |
+
|
50 |
+
// For XCode >= 4.6: the compiler is clang with libc++.
|
51 |
+
// For earlier XCode version: the compiler is gcc-4.2.1 with libstdc++.
|
52 |
+
// libc++ provides <unordered_map> and friends even in non C++11 mode,
|
53 |
+
// and it does not provide the tr1 library. Therefore the following macro
|
54 |
+
// checks against this special case.
|
55 |
+
// Note that we should not test the __APPLE_CC__ version number or the
|
56 |
+
// __clang__ macro, since the new compiler can still use -stdlib=libstdc++, in
|
57 |
+
// which case <unordered_map> is not compilable without -std=c++11
|
58 |
+
#elif defined(__APPLE_CC__)
|
59 |
+
# if __GNUC__ >= 4
|
60 |
+
# define GOOGLE_PROTOBUF_HAS_TR1
|
61 |
+
# else
|
62 |
+
// Not tested for gcc < 4... These setting can compile under 4.2.1 though.
|
63 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx
|
64 |
+
# include <ext/hash_map>
|
65 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
66 |
+
# include <ext/hash_set>
|
67 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
68 |
+
# endif
|
69 |
+
|
70 |
+
// Version checks for gcc.
|
71 |
+
#elif defined(__GNUC__)
|
72 |
+
// For GCC 4.x+, use tr1::unordered_map/set; otherwise, follow the
|
73 |
+
// instructions from:
|
74 |
+
// https://gcc.gnu.org/onlinedocs/libstdc++/manual/backwards.html
|
75 |
+
# if __GNUC__ >= 4
|
76 |
+
# define GOOGLE_PROTOBUF_HAS_TR1
|
77 |
+
# elif __GNUC__ >= 3
|
78 |
+
# include <backward/hash_map>
|
79 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
80 |
+
# include <backward/hash_set>
|
81 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
82 |
+
# if __GNUC__ == 3 && __GNUC_MINOR__ == 0
|
83 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE std // GCC 3.0
|
84 |
+
# else
|
85 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx // GCC 3.1 and later
|
86 |
+
# endif
|
87 |
+
# else
|
88 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE
|
89 |
+
# include <hash_map>
|
90 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
91 |
+
# include <hash_set>
|
92 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
93 |
+
# endif
|
94 |
+
|
95 |
+
// GCC <= 4.1 does not define std::tr1::hash for `long long int` or `long long unsigned int`
|
96 |
+
# if __GNUC__ == 4 && defined(__GNUC_MINOR__) && __GNUC_MINOR__ <= 1
|
97 |
+
# undef GOOGLE_PROTOBUF_HAS_TR1
|
98 |
+
# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
|
99 |
+
# undef GOOGLE_PROTOBUF_HAVE_HASH_SET
|
100 |
+
# endif
|
101 |
+
|
102 |
+
// Version checks for MSC.
|
103 |
+
// Apparently Microsoft decided to move hash_map *back* to the std namespace in
|
104 |
+
// MSVC 2010:
|
105 |
+
// http://blogs.msdn.com/vcblog/archive/2009/05/25/stl-breaking-changes-in-visual-studio-2010-beta-1.aspx
|
106 |
+
// And.. they are moved back to stdext in MSVC 2013 (haven't checked 2012). That
|
107 |
+
// said, use unordered_map for MSVC 2010 and beyond is our safest bet.
|
108 |
+
#elif defined(_MSC_VER)
|
109 |
+
# if _MSC_VER >= 1600 // Since Visual Studio 2010
|
110 |
+
# define GOOGLE_PROTOBUF_HAS_CXX11_HASH
|
111 |
+
# define GOOGLE_PROTOBUF_HASH_COMPARE std::hash_compare
|
112 |
+
# elif _MSC_VER >= 1500 // Since Visual Studio 2008
|
113 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext
|
114 |
+
# include <hash_map>
|
115 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
116 |
+
# include <hash_set>
|
117 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
118 |
+
# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare
|
119 |
+
# define GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE
|
120 |
+
# elif _MSC_VER >= 1310
|
121 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext
|
122 |
+
# include <hash_map>
|
123 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
124 |
+
# include <hash_set>
|
125 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
126 |
+
# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare
|
127 |
+
# else
|
128 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE std
|
129 |
+
# include <hash_map>
|
130 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map
|
131 |
+
# include <hash_set>
|
132 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set
|
133 |
+
# define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare
|
134 |
+
# endif
|
135 |
+
|
136 |
+
// **ADD NEW COMPILERS SUPPORT HERE.**
|
137 |
+
// For other compilers, undefine the macro and fallback to use std::map, in
|
138 |
+
// google/protobuf/stubs/hash.h
|
139 |
+
#else
|
140 |
+
# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP
|
141 |
+
# undef GOOGLE_PROTOBUF_HAVE_HASH_SET
|
142 |
+
#endif
|
143 |
+
|
144 |
+
#if defined(GOOGLE_PROTOBUF_HAS_CXX11_HASH)
|
145 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE std
|
146 |
+
# include <unordered_map>
|
147 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map
|
148 |
+
# include <unordered_set>
|
149 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set
|
150 |
+
#elif defined(GOOGLE_PROTOBUF_HAS_TR1)
|
151 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE std::tr1
|
152 |
+
# include <tr1/unordered_map>
|
153 |
+
# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map
|
154 |
+
# include <tr1/unordered_set>
|
155 |
+
# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set
|
156 |
+
#endif
|
157 |
+
|
158 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START \
|
159 |
+
namespace google { \
|
160 |
+
namespace protobuf {
|
161 |
+
# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END }}
|
162 |
+
|
163 |
+
#undef GOOGLE_PROTOBUF_HAS_CXX11_HASH
|
164 |
+
#undef GOOGLE_PROTOBUF_HAS_TR1
|
165 |
+
|
166 |
+
#if defined(GOOGLE_PROTOBUF_HAVE_HASH_MAP) && \
|
167 |
+
defined(GOOGLE_PROTOBUF_HAVE_HASH_SET)
|
168 |
+
#else
|
169 |
+
#define GOOGLE_PROTOBUF_MISSING_HASH
|
170 |
+
#include <map>
|
171 |
+
#include <set>
|
172 |
+
#endif
|
173 |
+
|
174 |
+
namespace google {
|
175 |
+
namespace protobuf {
|
176 |
+
|
177 |
+
#ifdef GOOGLE_PROTOBUF_MISSING_HASH
|
178 |
+
#undef GOOGLE_PROTOBUF_MISSING_HASH
|
179 |
+
|
180 |
+
// This system doesn't have hash_map or hash_set. Emulate them using map and
|
181 |
+
// set.
|
182 |
+
|
183 |
+
// Make hash<T> be the same as less<T>. Note that everywhere where custom
|
184 |
+
// hash functions are defined in the protobuf code, they are also defined such
|
185 |
+
// that they can be used as "less" functions, which is required by MSVC anyway.
|
186 |
+
template <typename Key>
|
187 |
+
struct hash {
|
188 |
+
// Dummy, just to make derivative hash functions compile.
|
189 |
+
int operator()(const Key& key) {
|
190 |
+
GOOGLE_LOG(FATAL) << "Should never be called.";
|
191 |
+
return 0;
|
192 |
+
}
|
193 |
+
|
194 |
+
inline bool operator()(const Key& a, const Key& b) const {
|
195 |
+
return a < b;
|
196 |
+
}
|
197 |
+
};
|
198 |
+
|
199 |
+
// Make sure char* is compared by value.
|
200 |
+
template <>
|
201 |
+
struct hash<const char*> {
|
202 |
+
// Dummy, just to make derivative hash functions compile.
|
203 |
+
int operator()(const char* key) {
|
204 |
+
GOOGLE_LOG(FATAL) << "Should never be called.";
|
205 |
+
return 0;
|
206 |
+
}
|
207 |
+
|
208 |
+
inline bool operator()(const char* a, const char* b) const {
|
209 |
+
return strcmp(a, b) < 0;
|
210 |
+
}
|
211 |
+
};
|
212 |
+
|
213 |
+
template <typename Key, typename Data,
|
214 |
+
typename HashFcn = hash<Key>,
|
215 |
+
typename EqualKey = std::equal_to<Key>,
|
216 |
+
typename Alloc = std::allocator< std::pair<const Key, Data> > >
|
217 |
+
class hash_map : public std::map<Key, Data, HashFcn, Alloc> {
|
218 |
+
typedef std::map<Key, Data, HashFcn, Alloc> BaseClass;
|
219 |
+
|
220 |
+
public:
|
221 |
+
hash_map(int a = 0, const HashFcn& b = HashFcn(),
|
222 |
+
const EqualKey& c = EqualKey(),
|
223 |
+
const Alloc& d = Alloc()) : BaseClass(b, d) {}
|
224 |
+
|
225 |
+
HashFcn hash_function() const { return HashFcn(); }
|
226 |
+
};
|
227 |
+
|
228 |
+
template <typename Key,
|
229 |
+
typename HashFcn = hash<Key>,
|
230 |
+
typename EqualKey = std::equal_to<Key> >
|
231 |
+
class hash_set : public std::set<Key, HashFcn> {
|
232 |
+
public:
|
233 |
+
hash_set(int = 0) {}
|
234 |
+
|
235 |
+
HashFcn hash_function() const { return HashFcn(); }
|
236 |
+
};
|
237 |
+
|
238 |
+
#elif defined(_MSC_VER) && !defined(_STLPORT_VERSION) && \
|
239 |
+
!(defined(_LIBCPP_STD_VER) && _LIBCPP_STD_VER >= 11)
|
240 |
+
|
241 |
+
template <typename Key>
|
242 |
+
struct hash : public GOOGLE_PROTOBUF_HASH_COMPARE<Key> {
|
243 |
+
};
|
244 |
+
|
245 |
+
// MSVC's hash_compare<const char*> hashes based on the string contents but
|
246 |
+
// compares based on the string pointer. WTF?
|
247 |
+
class CstringLess {
|
248 |
+
public:
|
249 |
+
inline bool operator()(const char* a, const char* b) const {
|
250 |
+
return strcmp(a, b) < 0;
|
251 |
+
}
|
252 |
+
};
|
253 |
+
|
254 |
+
template <>
|
255 |
+
struct hash<const char*>
|
256 |
+
: public GOOGLE_PROTOBUF_HASH_COMPARE<const char*, CstringLess> {};
|
257 |
+
|
258 |
+
#ifdef GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE
|
259 |
+
|
260 |
+
template <typename Key, typename HashFcn, typename EqualKey>
|
261 |
+
struct InternalHashCompare : public GOOGLE_PROTOBUF_HASH_COMPARE<Key> {
|
262 |
+
InternalHashCompare() {}
|
263 |
+
InternalHashCompare(HashFcn hashfcn, EqualKey equalkey)
|
264 |
+
: hashfcn_(hashfcn), equalkey_(equalkey) {}
|
265 |
+
size_t operator()(const Key& key) const { return hashfcn_(key); }
|
266 |
+
bool operator()(const Key& key1, const Key& key2) const {
|
267 |
+
return !equalkey_(key1, key2);
|
268 |
+
}
|
269 |
+
HashFcn hashfcn_;
|
270 |
+
EqualKey equalkey_;
|
271 |
+
};
|
272 |
+
|
273 |
+
template <typename Key, typename Data,
|
274 |
+
typename HashFcn = hash<Key>,
|
275 |
+
typename EqualKey = std::equal_to<Key>,
|
276 |
+
typename Alloc = std::allocator< std::pair<const Key, Data> > >
|
277 |
+
class hash_map
|
278 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
279 |
+
Key, Data, InternalHashCompare<Key, HashFcn, EqualKey>, Alloc> {
|
280 |
+
typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
281 |
+
Key, Data, InternalHashCompare<Key, HashFcn, EqualKey>, Alloc> BaseClass;
|
282 |
+
|
283 |
+
public:
|
284 |
+
hash_map(int a = 0, const HashFcn& b = HashFcn(),
|
285 |
+
const EqualKey& c = EqualKey(), const Alloc& d = Alloc())
|
286 |
+
: BaseClass(InternalHashCompare<Key, HashFcn, EqualKey>(b, c), d) {}
|
287 |
+
|
288 |
+
HashFcn hash_function() const { return HashFcn(); }
|
289 |
+
};
|
290 |
+
|
291 |
+
template <typename Key, typename HashFcn = hash<Key>,
|
292 |
+
typename EqualKey = std::equal_to<Key> >
|
293 |
+
class hash_set
|
294 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<
|
295 |
+
Key, InternalHashCompare<Key, HashFcn, EqualKey> > {
|
296 |
+
public:
|
297 |
+
hash_set(int = 0) {}
|
298 |
+
|
299 |
+
HashFcn hash_function() const { return HashFcn(); }
|
300 |
+
};
|
301 |
+
|
302 |
+
#else // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE
|
303 |
+
|
304 |
+
template <typename Key, typename Data,
|
305 |
+
typename HashFcn = hash<Key>,
|
306 |
+
typename EqualKey = std::equal_to<Key>,
|
307 |
+
typename Alloc = std::allocator< std::pair<const Key, Data> > >
|
308 |
+
class hash_map
|
309 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
310 |
+
Key, Data, HashFcn, EqualKey, Alloc> {
|
311 |
+
typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
312 |
+
Key, Data, HashFcn, EqualKey, Alloc> BaseClass;
|
313 |
+
|
314 |
+
public:
|
315 |
+
hash_map(int a = 0, const HashFcn& b = HashFcn(),
|
316 |
+
const EqualKey& c = EqualKey(),
|
317 |
+
const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}
|
318 |
+
|
319 |
+
HashFcn hash_function() const { return HashFcn(); }
|
320 |
+
};
|
321 |
+
|
322 |
+
template <typename Key, typename HashFcn = hash<Key>,
|
323 |
+
typename EqualKey = std::equal_to<Key> >
|
324 |
+
class hash_set
|
325 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<
|
326 |
+
Key, HashFcn, EqualKey> {
|
327 |
+
public:
|
328 |
+
hash_set(int = 0) {}
|
329 |
+
|
330 |
+
HashFcn hash_function() const { return HashFcn(); }
|
331 |
+
};
|
332 |
+
#endif // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE
|
333 |
+
|
334 |
+
#else // defined(_MSC_VER) && !defined(_STLPORT_VERSION)
|
335 |
+
|
336 |
+
template <typename Key>
|
337 |
+
struct hash : public GOOGLE_PROTOBUF_HASH_NAMESPACE::hash<Key> {
|
338 |
+
};
|
339 |
+
|
340 |
+
template <typename Key>
|
341 |
+
struct hash<const Key*> {
|
342 |
+
inline size_t operator()(const Key* key) const {
|
343 |
+
return reinterpret_cast<size_t>(key);
|
344 |
+
}
|
345 |
+
};
|
346 |
+
|
347 |
+
// Unlike the old SGI version, the TR1 "hash" does not special-case char*. So,
|
348 |
+
// we go ahead and provide our own implementation.
|
349 |
+
template <>
|
350 |
+
struct hash<const char*> {
|
351 |
+
inline size_t operator()(const char* str) const {
|
352 |
+
size_t result = 0;
|
353 |
+
for (; *str != '\0'; str++) {
|
354 |
+
result = 5 * result + static_cast<size_t>(*str);
|
355 |
+
}
|
356 |
+
return result;
|
357 |
+
}
|
358 |
+
};
|
359 |
+
|
360 |
+
template<>
|
361 |
+
struct hash<bool> {
|
362 |
+
size_t operator()(bool x) const {
|
363 |
+
return static_cast<size_t>(x);
|
364 |
+
}
|
365 |
+
};
|
366 |
+
|
367 |
+
template <typename Key, typename Data,
|
368 |
+
typename HashFcn = hash<Key>,
|
369 |
+
typename EqualKey = std::equal_to<Key>,
|
370 |
+
typename Alloc = std::allocator< std::pair<const Key, Data> > >
|
371 |
+
class hash_map
|
372 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
373 |
+
Key, Data, HashFcn, EqualKey, Alloc> {
|
374 |
+
typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<
|
375 |
+
Key, Data, HashFcn, EqualKey, Alloc> BaseClass;
|
376 |
+
|
377 |
+
public:
|
378 |
+
hash_map(int a = 0, const HashFcn& b = HashFcn(),
|
379 |
+
const EqualKey& c = EqualKey(),
|
380 |
+
const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}
|
381 |
+
|
382 |
+
HashFcn hash_function() const { return HashFcn(); }
|
383 |
+
};
|
384 |
+
|
385 |
+
template <typename Key, typename HashFcn = hash<Key>,
|
386 |
+
typename EqualKey = std::equal_to<Key> >
|
387 |
+
class hash_set
|
388 |
+
: public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<
|
389 |
+
Key, HashFcn, EqualKey> {
|
390 |
+
public:
|
391 |
+
hash_set(int = 0) {}
|
392 |
+
|
393 |
+
HashFcn hash_function() const { return HashFcn(); }
|
394 |
+
};
|
395 |
+
|
396 |
+
#endif // !GOOGLE_PROTOBUF_MISSING_HASH
|
397 |
+
|
398 |
+
template <>
|
399 |
+
struct hash<string> {
|
400 |
+
inline size_t operator()(const string& key) const {
|
401 |
+
return hash<const char*>()(key.c_str());
|
402 |
+
}
|
403 |
+
|
404 |
+
static const size_t bucket_size = 4;
|
405 |
+
static const size_t min_buckets = 8;
|
406 |
+
inline bool operator()(const string& a, const string& b) const {
|
407 |
+
return a < b;
|
408 |
+
}
|
409 |
+
};
|
410 |
+
|
411 |
+
template <typename First, typename Second>
|
412 |
+
struct hash<std::pair<First, Second> > {
|
413 |
+
inline size_t operator()(const std::pair<First, Second>& key) const {
|
414 |
+
size_t first_hash = hash<First>()(key.first);
|
415 |
+
size_t second_hash = hash<Second>()(key.second);
|
416 |
+
|
417 |
+
// FIXME(kenton): What is the best way to compute this hash? I have
|
418 |
+
// no idea! This seems a bit better than an XOR.
|
419 |
+
return first_hash * ((1 << 16) - 1) + second_hash;
|
420 |
+
}
|
421 |
+
|
422 |
+
static const size_t bucket_size = 4;
|
423 |
+
static const size_t min_buckets = 8;
|
424 |
+
inline bool operator()(const std::pair<First, Second>& a,
|
425 |
+
const std::pair<First, Second>& b) const {
|
426 |
+
return a < b;
|
427 |
+
}
|
428 |
+
};
|
429 |
+
|
430 |
+
// Used by GCC/SGI STL only. (Why isn't this provided by the standard
|
431 |
+
// library? :( )
|
432 |
+
struct streq {
|
433 |
+
inline bool operator()(const char* a, const char* b) const {
|
434 |
+
return strcmp(a, b) == 0;
|
435 |
+
}
|
436 |
+
};
|
437 |
+
|
438 |
+
} // namespace protobuf
|
439 |
+
} // namespace google
|
440 |
+
|
441 |
+
#endif // GOOGLE_PROTOBUF_STUBS_HASH_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/int128.h
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_INT128_H_
|
31 |
+
#define GOOGLE_PROTOBUF_STUBS_INT128_H_
|
32 |
+
|
33 |
+
#include <google/protobuf/stubs/common.h>
|
34 |
+
|
35 |
+
#include <iosfwd>
|
36 |
+
|
37 |
+
namespace google {
|
38 |
+
namespace protobuf {
|
39 |
+
|
40 |
+
struct uint128_pod;
|
41 |
+
|
42 |
+
// TODO(xiaofeng): Define GOOGLE_PROTOBUF_HAS_CONSTEXPR when constexpr is
|
43 |
+
// available.
|
44 |
+
#ifdef GOOGLE_PROTOBUF_HAS_CONSTEXPR
|
45 |
+
# define UINT128_CONSTEXPR constexpr
|
46 |
+
#else
|
47 |
+
# define UINT128_CONSTEXPR
|
48 |
+
#endif
|
49 |
+
|
50 |
+
// An unsigned 128-bit integer type. Thread-compatible.
|
51 |
+
class LIBPROTOBUF_EXPORT uint128 {
|
52 |
+
public:
|
53 |
+
UINT128_CONSTEXPR uint128(); // Sets to 0, but don't trust on this behavior.
|
54 |
+
UINT128_CONSTEXPR uint128(uint64 top, uint64 bottom);
|
55 |
+
#ifndef SWIG
|
56 |
+
UINT128_CONSTEXPR uint128(int bottom);
|
57 |
+
UINT128_CONSTEXPR uint128(uint32 bottom); // Top 96 bits = 0
|
58 |
+
#endif
|
59 |
+
UINT128_CONSTEXPR uint128(uint64 bottom); // hi_ = 0
|
60 |
+
UINT128_CONSTEXPR uint128(const uint128_pod &val);
|
61 |
+
|
62 |
+
// Trivial copy constructor, assignment operator and destructor.
|
63 |
+
|
64 |
+
void Initialize(uint64 top, uint64 bottom);
|
65 |
+
|
66 |
+
// Arithmetic operators.
|
67 |
+
uint128& operator+=(const uint128& b);
|
68 |
+
uint128& operator-=(const uint128& b);
|
69 |
+
uint128& operator*=(const uint128& b);
|
70 |
+
// Long division/modulo for uint128.
|
71 |
+
uint128& operator/=(const uint128& b);
|
72 |
+
uint128& operator%=(const uint128& b);
|
73 |
+
uint128 operator++(int);
|
74 |
+
uint128 operator--(int);
|
75 |
+
uint128& operator<<=(int);
|
76 |
+
uint128& operator>>=(int);
|
77 |
+
uint128& operator&=(const uint128& b);
|
78 |
+
uint128& operator|=(const uint128& b);
|
79 |
+
uint128& operator^=(const uint128& b);
|
80 |
+
uint128& operator++();
|
81 |
+
uint128& operator--();
|
82 |
+
|
83 |
+
friend uint64 Uint128Low64(const uint128& v);
|
84 |
+
friend uint64 Uint128High64(const uint128& v);
|
85 |
+
|
86 |
+
// We add "std::" to avoid including all of port.h.
|
87 |
+
LIBPROTOBUF_EXPORT friend std::ostream& operator<<(std::ostream& o,
|
88 |
+
const uint128& b);
|
89 |
+
|
90 |
+
private:
|
91 |
+
static void DivModImpl(uint128 dividend, uint128 divisor,
|
92 |
+
uint128* quotient_ret, uint128* remainder_ret);
|
93 |
+
|
94 |
+
// Little-endian memory order optimizations can benefit from
|
95 |
+
// having lo_ first, hi_ last.
|
96 |
+
// See util/endian/endian.h and Load128/Store128 for storing a uint128.
|
97 |
+
uint64 lo_;
|
98 |
+
uint64 hi_;
|
99 |
+
|
100 |
+
// Not implemented, just declared for catching automatic type conversions.
|
101 |
+
uint128(uint8);
|
102 |
+
uint128(uint16);
|
103 |
+
uint128(float v);
|
104 |
+
uint128(double v);
|
105 |
+
};
|
106 |
+
|
107 |
+
// This is a POD form of uint128 which can be used for static variables which
|
108 |
+
// need to be operated on as uint128.
|
109 |
+
struct uint128_pod {
|
110 |
+
// Note: The ordering of fields is different than 'class uint128' but the
|
111 |
+
// same as its 2-arg constructor. This enables more obvious initialization
|
112 |
+
// of static instances, which is the primary reason for this struct in the
|
113 |
+
// first place. This does not seem to defeat any optimizations wrt
|
114 |
+
// operations involving this struct.
|
115 |
+
uint64 hi;
|
116 |
+
uint64 lo;
|
117 |
+
};
|
118 |
+
|
119 |
+
LIBPROTOBUF_EXPORT extern const uint128_pod kuint128max;
|
120 |
+
|
121 |
+
// allow uint128 to be logged
|
122 |
+
LIBPROTOBUF_EXPORT extern std::ostream& operator<<(std::ostream& o,
|
123 |
+
const uint128& b);
|
124 |
+
|
125 |
+
// Methods to access low and high pieces of 128-bit value.
|
126 |
+
// Defined externally from uint128 to facilitate conversion
|
127 |
+
// to native 128-bit types when compilers support them.
|
128 |
+
inline uint64 Uint128Low64(const uint128& v) { return v.lo_; }
|
129 |
+
inline uint64 Uint128High64(const uint128& v) { return v.hi_; }
|
130 |
+
|
131 |
+
// TODO: perhaps it would be nice to have int128, a signed 128-bit type?
|
132 |
+
|
133 |
+
// --------------------------------------------------------------------------
|
134 |
+
// Implementation details follow
|
135 |
+
// --------------------------------------------------------------------------
|
136 |
+
inline bool operator==(const uint128& lhs, const uint128& rhs) {
|
137 |
+
return (Uint128Low64(lhs) == Uint128Low64(rhs) &&
|
138 |
+
Uint128High64(lhs) == Uint128High64(rhs));
|
139 |
+
}
|
140 |
+
inline bool operator!=(const uint128& lhs, const uint128& rhs) {
|
141 |
+
return !(lhs == rhs);
|
142 |
+
}
|
143 |
+
|
144 |
+
inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
|
145 |
+
inline UINT128_CONSTEXPR uint128::uint128(uint64 top, uint64 bottom)
|
146 |
+
: lo_(bottom), hi_(top) {}
|
147 |
+
inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
|
148 |
+
: lo_(v.lo), hi_(v.hi) {}
|
149 |
+
inline UINT128_CONSTEXPR uint128::uint128(uint64 bottom)
|
150 |
+
: lo_(bottom), hi_(0) {}
|
151 |
+
#ifndef SWIG
|
152 |
+
inline UINT128_CONSTEXPR uint128::uint128(uint32 bottom)
|
153 |
+
: lo_(bottom), hi_(0) {}
|
154 |
+
inline UINT128_CONSTEXPR uint128::uint128(int bottom)
|
155 |
+
: lo_(bottom), hi_(static_cast<int64>((bottom < 0) ? -1 : 0)) {}
|
156 |
+
#endif
|
157 |
+
|
158 |
+
#undef UINT128_CONSTEXPR
|
159 |
+
|
160 |
+
inline void uint128::Initialize(uint64 top, uint64 bottom) {
|
161 |
+
hi_ = top;
|
162 |
+
lo_ = bottom;
|
163 |
+
}
|
164 |
+
|
165 |
+
// Comparison operators.
|
166 |
+
|
167 |
+
#define CMP128(op) \
|
168 |
+
inline bool operator op(const uint128& lhs, const uint128& rhs) { \
|
169 |
+
return (Uint128High64(lhs) == Uint128High64(rhs)) ? \
|
170 |
+
(Uint128Low64(lhs) op Uint128Low64(rhs)) : \
|
171 |
+
(Uint128High64(lhs) op Uint128High64(rhs)); \
|
172 |
+
}
|
173 |
+
|
174 |
+
CMP128(<)
|
175 |
+
CMP128(>)
|
176 |
+
CMP128(>=)
|
177 |
+
CMP128(<=)
|
178 |
+
|
179 |
+
#undef CMP128
|
180 |
+
|
181 |
+
// Unary operators
|
182 |
+
|
183 |
+
inline uint128 operator-(const uint128& val) {
|
184 |
+
const uint64 hi_flip = ~Uint128High64(val);
|
185 |
+
const uint64 lo_flip = ~Uint128Low64(val);
|
186 |
+
const uint64 lo_add = lo_flip + 1;
|
187 |
+
if (lo_add < lo_flip) {
|
188 |
+
return uint128(hi_flip + 1, lo_add);
|
189 |
+
}
|
190 |
+
return uint128(hi_flip, lo_add);
|
191 |
+
}
|
192 |
+
|
193 |
+
inline bool operator!(const uint128& val) {
|
194 |
+
return !Uint128High64(val) && !Uint128Low64(val);
|
195 |
+
}
|
196 |
+
|
197 |
+
// Logical operators.
|
198 |
+
|
199 |
+
inline uint128 operator~(const uint128& val) {
|
200 |
+
return uint128(~Uint128High64(val), ~Uint128Low64(val));
|
201 |
+
}
|
202 |
+
|
203 |
+
#define LOGIC128(op) \
|
204 |
+
inline uint128 operator op(const uint128& lhs, const uint128& rhs) { \
|
205 |
+
return uint128(Uint128High64(lhs) op Uint128High64(rhs), \
|
206 |
+
Uint128Low64(lhs) op Uint128Low64(rhs)); \
|
207 |
+
}
|
208 |
+
|
209 |
+
LOGIC128(|)
|
210 |
+
LOGIC128(&)
|
211 |
+
LOGIC128(^)
|
212 |
+
|
213 |
+
#undef LOGIC128
|
214 |
+
|
215 |
+
#define LOGICASSIGN128(op) \
|
216 |
+
inline uint128& uint128::operator op(const uint128& other) { \
|
217 |
+
hi_ op other.hi_; \
|
218 |
+
lo_ op other.lo_; \
|
219 |
+
return *this; \
|
220 |
+
}
|
221 |
+
|
222 |
+
LOGICASSIGN128(|=)
|
223 |
+
LOGICASSIGN128(&=)
|
224 |
+
LOGICASSIGN128(^=)
|
225 |
+
|
226 |
+
#undef LOGICASSIGN128
|
227 |
+
|
228 |
+
// Shift operators.
|
229 |
+
|
230 |
+
inline uint128 operator<<(const uint128& val, int amount) {
|
231 |
+
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
|
232 |
+
if (amount < 64) {
|
233 |
+
if (amount == 0) {
|
234 |
+
return val;
|
235 |
+
}
|
236 |
+
uint64 new_hi = (Uint128High64(val) << amount) |
|
237 |
+
(Uint128Low64(val) >> (64 - amount));
|
238 |
+
uint64 new_lo = Uint128Low64(val) << amount;
|
239 |
+
return uint128(new_hi, new_lo);
|
240 |
+
} else if (amount < 128) {
|
241 |
+
return uint128(Uint128Low64(val) << (amount - 64), 0);
|
242 |
+
} else {
|
243 |
+
return uint128(0, 0);
|
244 |
+
}
|
245 |
+
}
|
246 |
+
|
247 |
+
inline uint128 operator>>(const uint128& val, int amount) {
|
248 |
+
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
|
249 |
+
if (amount < 64) {
|
250 |
+
if (amount == 0) {
|
251 |
+
return val;
|
252 |
+
}
|
253 |
+
uint64 new_hi = Uint128High64(val) >> amount;
|
254 |
+
uint64 new_lo = (Uint128Low64(val) >> amount) |
|
255 |
+
(Uint128High64(val) << (64 - amount));
|
256 |
+
return uint128(new_hi, new_lo);
|
257 |
+
} else if (amount < 128) {
|
258 |
+
return uint128(0, Uint128High64(val) >> (amount - 64));
|
259 |
+
} else {
|
260 |
+
return uint128(0, 0);
|
261 |
+
}
|
262 |
+
}
|
263 |
+
|
264 |
+
inline uint128& uint128::operator<<=(int amount) {
|
265 |
+
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
|
266 |
+
if (amount < 64) {
|
267 |
+
if (amount != 0) {
|
268 |
+
hi_ = (hi_ << amount) | (lo_ >> (64 - amount));
|
269 |
+
lo_ = lo_ << amount;
|
270 |
+
}
|
271 |
+
} else if (amount < 128) {
|
272 |
+
hi_ = lo_ << (amount - 64);
|
273 |
+
lo_ = 0;
|
274 |
+
} else {
|
275 |
+
hi_ = 0;
|
276 |
+
lo_ = 0;
|
277 |
+
}
|
278 |
+
return *this;
|
279 |
+
}
|
280 |
+
|
281 |
+
inline uint128& uint128::operator>>=(int amount) {
|
282 |
+
// uint64 shifts of >= 64 are undefined, so we will need some special-casing.
|
283 |
+
if (amount < 64) {
|
284 |
+
if (amount != 0) {
|
285 |
+
lo_ = (lo_ >> amount) | (hi_ << (64 - amount));
|
286 |
+
hi_ = hi_ >> amount;
|
287 |
+
}
|
288 |
+
} else if (amount < 128) {
|
289 |
+
lo_ = hi_ >> (amount - 64);
|
290 |
+
hi_ = 0;
|
291 |
+
} else {
|
292 |
+
lo_ = 0;
|
293 |
+
hi_ = 0;
|
294 |
+
}
|
295 |
+
return *this;
|
296 |
+
}
|
297 |
+
|
298 |
+
inline uint128 operator+(const uint128& lhs, const uint128& rhs) {
|
299 |
+
return uint128(lhs) += rhs;
|
300 |
+
}
|
301 |
+
|
302 |
+
inline uint128 operator-(const uint128& lhs, const uint128& rhs) {
|
303 |
+
return uint128(lhs) -= rhs;
|
304 |
+
}
|
305 |
+
|
306 |
+
inline uint128 operator*(const uint128& lhs, const uint128& rhs) {
|
307 |
+
return uint128(lhs) *= rhs;
|
308 |
+
}
|
309 |
+
|
310 |
+
inline uint128 operator/(const uint128& lhs, const uint128& rhs) {
|
311 |
+
return uint128(lhs) /= rhs;
|
312 |
+
}
|
313 |
+
|
314 |
+
inline uint128 operator%(const uint128& lhs, const uint128& rhs) {
|
315 |
+
return uint128(lhs) %= rhs;
|
316 |
+
}
|
317 |
+
|
318 |
+
inline uint128& uint128::operator+=(const uint128& b) {
|
319 |
+
hi_ += b.hi_;
|
320 |
+
uint64 lolo = lo_ + b.lo_;
|
321 |
+
if (lolo < lo_)
|
322 |
+
++hi_;
|
323 |
+
lo_ = lolo;
|
324 |
+
return *this;
|
325 |
+
}
|
326 |
+
|
327 |
+
inline uint128& uint128::operator-=(const uint128& b) {
|
328 |
+
hi_ -= b.hi_;
|
329 |
+
if (b.lo_ > lo_)
|
330 |
+
--hi_;
|
331 |
+
lo_ -= b.lo_;
|
332 |
+
return *this;
|
333 |
+
}
|
334 |
+
|
335 |
+
inline uint128& uint128::operator*=(const uint128& b) {
|
336 |
+
uint64 a96 = hi_ >> 32;
|
337 |
+
uint64 a64 = hi_ & 0xffffffffu;
|
338 |
+
uint64 a32 = lo_ >> 32;
|
339 |
+
uint64 a00 = lo_ & 0xffffffffu;
|
340 |
+
uint64 b96 = b.hi_ >> 32;
|
341 |
+
uint64 b64 = b.hi_ & 0xffffffffu;
|
342 |
+
uint64 b32 = b.lo_ >> 32;
|
343 |
+
uint64 b00 = b.lo_ & 0xffffffffu;
|
344 |
+
// multiply [a96 .. a00] x [b96 .. b00]
|
345 |
+
// terms higher than c96 disappear off the high side
|
346 |
+
// terms c96 and c64 are safe to ignore carry bit
|
347 |
+
uint64 c96 = a96 * b00 + a64 * b32 + a32 * b64 + a00 * b96;
|
348 |
+
uint64 c64 = a64 * b00 + a32 * b32 + a00 * b64;
|
349 |
+
this->hi_ = (c96 << 32) + c64;
|
350 |
+
this->lo_ = 0;
|
351 |
+
// add terms after this one at a time to capture carry
|
352 |
+
*this += uint128(a32 * b00) << 32;
|
353 |
+
*this += uint128(a00 * b32) << 32;
|
354 |
+
*this += a00 * b00;
|
355 |
+
return *this;
|
356 |
+
}
|
357 |
+
|
358 |
+
inline uint128 uint128::operator++(int) {
|
359 |
+
uint128 tmp(*this);
|
360 |
+
*this += 1;
|
361 |
+
return tmp;
|
362 |
+
}
|
363 |
+
|
364 |
+
inline uint128 uint128::operator--(int) {
|
365 |
+
uint128 tmp(*this);
|
366 |
+
*this -= 1;
|
367 |
+
return tmp;
|
368 |
+
}
|
369 |
+
|
370 |
+
inline uint128& uint128::operator++() {
|
371 |
+
*this += 1;
|
372 |
+
return *this;
|
373 |
+
}
|
374 |
+
|
375 |
+
inline uint128& uint128::operator--() {
|
376 |
+
*this -= 1;
|
377 |
+
return *this;
|
378 |
+
}
|
379 |
+
|
380 |
+
} // namespace protobuf
|
381 |
+
} // namespace google
|
382 |
+
|
383 |
+
#endif // GOOGLE_PROTOBUF_STUBS_INT128_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/io_win32.h
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Laszlo Csomor)
|
32 |
+
//
|
33 |
+
// This file contains the declarations for Windows implementations of
|
34 |
+
// commonly used POSIX functions such as open(2) and access(2), as well
|
35 |
+
// as macro definitions for flags of these functions.
|
36 |
+
//
|
37 |
+
// By including this file you'll redefine open/access/etc. to
|
38 |
+
// ::google::protobuf::internal::win32::{open/access/etc.}.
|
39 |
+
// Make sure you don't include a header that attempts to redeclare or
|
40 |
+
// redefine these functions, that'll lead to confusing compilation
|
41 |
+
// errors. It's best to #include this file as the last one to ensure that.
|
42 |
+
//
|
43 |
+
// This file is only used on Windows, it's empty on other platforms.
|
44 |
+
|
45 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__
|
46 |
+
#define GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__
|
47 |
+
|
48 |
+
#if defined(_WIN32)
|
49 |
+
|
50 |
+
#include <string>
|
51 |
+
#include <google/protobuf/stubs/port.h>
|
52 |
+
|
53 |
+
// Compilers on Windows other than MSVC (e.g. Cygwin, MinGW32) define the
|
54 |
+
// following functions already, except for mkdir.
|
55 |
+
namespace google {
|
56 |
+
namespace protobuf {
|
57 |
+
namespace internal {
|
58 |
+
namespace win32 {
|
59 |
+
|
60 |
+
LIBPROTOBUF_EXPORT FILE* fopen(const char* path, const char* mode);
|
61 |
+
LIBPROTOBUF_EXPORT int access(const char* path, int mode);
|
62 |
+
LIBPROTOBUF_EXPORT int chdir(const char* path);
|
63 |
+
LIBPROTOBUF_EXPORT int close(int fd);
|
64 |
+
LIBPROTOBUF_EXPORT int dup(int fd);
|
65 |
+
LIBPROTOBUF_EXPORT int dup2(int fd1, int fd2);
|
66 |
+
LIBPROTOBUF_EXPORT int mkdir(const char* path, int _mode);
|
67 |
+
LIBPROTOBUF_EXPORT int open(const char* path, int flags, int mode = 0);
|
68 |
+
LIBPROTOBUF_EXPORT int read(int fd, void* buffer, size_t size);
|
69 |
+
LIBPROTOBUF_EXPORT int setmode(int fd, int mode);
|
70 |
+
LIBPROTOBUF_EXPORT int stat(const char* path, struct _stat* buffer);
|
71 |
+
LIBPROTOBUF_EXPORT int write(int fd, const void* buffer, size_t size);
|
72 |
+
LIBPROTOBUF_EXPORT std::wstring testonly_utf8_to_winpath(const char* path);
|
73 |
+
|
74 |
+
namespace strings {
|
75 |
+
|
76 |
+
// Convert from UTF-16 to Active-Code-Page-encoded or to UTF-8-encoded text.
|
77 |
+
LIBPROTOBUF_EXPORT bool wcs_to_mbs(
|
78 |
+
const wchar_t* s, std::string* out, bool outUtf8);
|
79 |
+
|
80 |
+
// Convert from Active-Code-Page-encoded or UTF-8-encoded text to UTF-16.
|
81 |
+
LIBPROTOBUF_EXPORT bool mbs_to_wcs(
|
82 |
+
const char* s, std::wstring* out, bool inUtf8);
|
83 |
+
|
84 |
+
// Convert from UTF-8-encoded text to UTF-16.
|
85 |
+
LIBPROTOBUF_EXPORT bool utf8_to_wcs(const char* input, std::wstring* out);
|
86 |
+
|
87 |
+
// Convert from UTF-16-encoded text to UTF-8.
|
88 |
+
LIBPROTOBUF_EXPORT bool wcs_to_utf8(const wchar_t* input, std::string* out);
|
89 |
+
|
90 |
+
} // namespace strings
|
91 |
+
|
92 |
+
} // namespace win32
|
93 |
+
} // namespace internal
|
94 |
+
} // namespace protobuf
|
95 |
+
} // namespace google
|
96 |
+
|
97 |
+
#ifndef W_OK
|
98 |
+
#define W_OK 02 // not defined by MSVC for whatever reason
|
99 |
+
#endif
|
100 |
+
|
101 |
+
#ifndef F_OK
|
102 |
+
#define F_OK 00 // not defined by MSVC for whatever reason
|
103 |
+
#endif
|
104 |
+
|
105 |
+
#ifndef STDIN_FILENO
|
106 |
+
#define STDIN_FILENO 0
|
107 |
+
#endif
|
108 |
+
|
109 |
+
#ifndef STDOUT_FILENO
|
110 |
+
#define STDOUT_FILENO 1
|
111 |
+
#endif
|
112 |
+
|
113 |
+
#endif // defined(_WIN32)
|
114 |
+
|
115 |
+
#endif // GOOGLE_PROTOBUF_STUBS_IO_WIN32_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/logging.h
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_LOGGING_H_
|
32 |
+
#define GOOGLE_PROTOBUF_STUBS_LOGGING_H_
|
33 |
+
|
34 |
+
#include <google/protobuf/stubs/macros.h>
|
35 |
+
#include <google/protobuf/stubs/port.h>
|
36 |
+
|
37 |
+
// ===================================================================
|
38 |
+
// emulates google3/base/logging.h
|
39 |
+
|
40 |
+
namespace google {
|
41 |
+
namespace protobuf {
|
42 |
+
|
43 |
+
enum LogLevel {
|
44 |
+
LOGLEVEL_INFO, // Informational. This is never actually used by
|
45 |
+
// libprotobuf.
|
46 |
+
LOGLEVEL_WARNING, // Warns about issues that, although not technically a
|
47 |
+
// problem now, could cause problems in the future. For
|
48 |
+
// example, a // warning will be printed when parsing a
|
49 |
+
// message that is near the message size limit.
|
50 |
+
LOGLEVEL_ERROR, // An error occurred which should never happen during
|
51 |
+
// normal use.
|
52 |
+
LOGLEVEL_FATAL, // An error occurred from which the library cannot
|
53 |
+
// recover. This usually indicates a programming error
|
54 |
+
// in the code which calls the library, especially when
|
55 |
+
// compiled in debug mode.
|
56 |
+
|
57 |
+
#ifdef NDEBUG
|
58 |
+
LOGLEVEL_DFATAL = LOGLEVEL_ERROR
|
59 |
+
#else
|
60 |
+
LOGLEVEL_DFATAL = LOGLEVEL_FATAL
|
61 |
+
#endif
|
62 |
+
};
|
63 |
+
|
64 |
+
class StringPiece;
|
65 |
+
namespace util {
|
66 |
+
class Status;
|
67 |
+
}
|
68 |
+
class uint128;
|
69 |
+
namespace internal {
|
70 |
+
|
71 |
+
class LogFinisher;
|
72 |
+
|
73 |
+
class LIBPROTOBUF_EXPORT LogMessage {
|
74 |
+
public:
|
75 |
+
LogMessage(LogLevel level, const char* filename, int line);
|
76 |
+
~LogMessage();
|
77 |
+
|
78 |
+
LogMessage& operator<<(const std::string& value);
|
79 |
+
LogMessage& operator<<(const char* value);
|
80 |
+
LogMessage& operator<<(char value);
|
81 |
+
LogMessage& operator<<(int value);
|
82 |
+
LogMessage& operator<<(uint value);
|
83 |
+
LogMessage& operator<<(long value);
|
84 |
+
LogMessage& operator<<(unsigned long value);
|
85 |
+
LogMessage& operator<<(long long value);
|
86 |
+
LogMessage& operator<<(unsigned long long value);
|
87 |
+
LogMessage& operator<<(double value);
|
88 |
+
LogMessage& operator<<(void* value);
|
89 |
+
LogMessage& operator<<(const StringPiece& value);
|
90 |
+
LogMessage& operator<<(const ::google::protobuf::util::Status& status);
|
91 |
+
LogMessage& operator<<(const uint128& value);
|
92 |
+
|
93 |
+
private:
|
94 |
+
friend class LogFinisher;
|
95 |
+
void Finish();
|
96 |
+
|
97 |
+
LogLevel level_;
|
98 |
+
const char* filename_;
|
99 |
+
int line_;
|
100 |
+
std::string message_;
|
101 |
+
};
|
102 |
+
|
103 |
+
// Used to make the entire "LOG(BLAH) << etc." expression have a void return
|
104 |
+
// type and print a newline after each message.
|
105 |
+
class LIBPROTOBUF_EXPORT LogFinisher {
|
106 |
+
public:
|
107 |
+
void operator=(LogMessage& other);
|
108 |
+
};
|
109 |
+
|
110 |
+
template<typename T>
|
111 |
+
bool IsOk(T status) { return status.ok(); }
|
112 |
+
template<>
|
113 |
+
inline bool IsOk(bool status) { return status; }
|
114 |
+
|
115 |
+
} // namespace internal
|
116 |
+
|
117 |
+
// Undef everything in case we're being mixed with some other Google library
|
118 |
+
// which already defined them itself. Presumably all Google libraries will
|
119 |
+
// support the same syntax for these so it should not be a big deal if they
|
120 |
+
// end up using our definitions instead.
|
121 |
+
#undef GOOGLE_LOG
|
122 |
+
#undef GOOGLE_LOG_IF
|
123 |
+
|
124 |
+
#undef GOOGLE_CHECK
|
125 |
+
#undef GOOGLE_CHECK_OK
|
126 |
+
#undef GOOGLE_CHECK_EQ
|
127 |
+
#undef GOOGLE_CHECK_NE
|
128 |
+
#undef GOOGLE_CHECK_LT
|
129 |
+
#undef GOOGLE_CHECK_LE
|
130 |
+
#undef GOOGLE_CHECK_GT
|
131 |
+
#undef GOOGLE_CHECK_GE
|
132 |
+
#undef GOOGLE_CHECK_NOTNULL
|
133 |
+
|
134 |
+
#undef GOOGLE_DLOG
|
135 |
+
#undef GOOGLE_DCHECK
|
136 |
+
#undef GOOGLE_DCHECK_OK
|
137 |
+
#undef GOOGLE_DCHECK_EQ
|
138 |
+
#undef GOOGLE_DCHECK_NE
|
139 |
+
#undef GOOGLE_DCHECK_LT
|
140 |
+
#undef GOOGLE_DCHECK_LE
|
141 |
+
#undef GOOGLE_DCHECK_GT
|
142 |
+
#undef GOOGLE_DCHECK_GE
|
143 |
+
|
144 |
+
#define GOOGLE_LOG(LEVEL) \
|
145 |
+
::google::protobuf::internal::LogFinisher() = \
|
146 |
+
::google::protobuf::internal::LogMessage( \
|
147 |
+
::google::protobuf::LOGLEVEL_##LEVEL, __FILE__, __LINE__)
|
148 |
+
#define GOOGLE_LOG_IF(LEVEL, CONDITION) \
|
149 |
+
!(CONDITION) ? (void)0 : GOOGLE_LOG(LEVEL)
|
150 |
+
|
151 |
+
#define GOOGLE_CHECK(EXPRESSION) \
|
152 |
+
GOOGLE_LOG_IF(FATAL, !(EXPRESSION)) << "CHECK failed: " #EXPRESSION ": "
|
153 |
+
#define GOOGLE_CHECK_OK(A) GOOGLE_CHECK(::google::protobuf::internal::IsOk(A))
|
154 |
+
#define GOOGLE_CHECK_EQ(A, B) GOOGLE_CHECK((A) == (B))
|
155 |
+
#define GOOGLE_CHECK_NE(A, B) GOOGLE_CHECK((A) != (B))
|
156 |
+
#define GOOGLE_CHECK_LT(A, B) GOOGLE_CHECK((A) < (B))
|
157 |
+
#define GOOGLE_CHECK_LE(A, B) GOOGLE_CHECK((A) <= (B))
|
158 |
+
#define GOOGLE_CHECK_GT(A, B) GOOGLE_CHECK((A) > (B))
|
159 |
+
#define GOOGLE_CHECK_GE(A, B) GOOGLE_CHECK((A) >= (B))
|
160 |
+
|
161 |
+
namespace internal {
|
162 |
+
template<typename T>
|
163 |
+
T* CheckNotNull(const char* /* file */, int /* line */,
|
164 |
+
const char* name, T* val) {
|
165 |
+
if (val == NULL) {
|
166 |
+
GOOGLE_LOG(FATAL) << name;
|
167 |
+
}
|
168 |
+
return val;
|
169 |
+
}
|
170 |
+
} // namespace internal
|
171 |
+
#define GOOGLE_CHECK_NOTNULL(A) \
|
172 |
+
::google::protobuf::internal::CheckNotNull(\
|
173 |
+
__FILE__, __LINE__, "'" #A "' must not be NULL", (A))
|
174 |
+
|
175 |
+
#ifdef NDEBUG
|
176 |
+
|
177 |
+
#define GOOGLE_DLOG(LEVEL) GOOGLE_LOG_IF(LEVEL, false)
|
178 |
+
|
179 |
+
#define GOOGLE_DCHECK(EXPRESSION) while(false) GOOGLE_CHECK(EXPRESSION)
|
180 |
+
#define GOOGLE_DCHECK_OK(E) GOOGLE_DCHECK(::google::protobuf::internal::IsOk(E))
|
181 |
+
#define GOOGLE_DCHECK_EQ(A, B) GOOGLE_DCHECK((A) == (B))
|
182 |
+
#define GOOGLE_DCHECK_NE(A, B) GOOGLE_DCHECK((A) != (B))
|
183 |
+
#define GOOGLE_DCHECK_LT(A, B) GOOGLE_DCHECK((A) < (B))
|
184 |
+
#define GOOGLE_DCHECK_LE(A, B) GOOGLE_DCHECK((A) <= (B))
|
185 |
+
#define GOOGLE_DCHECK_GT(A, B) GOOGLE_DCHECK((A) > (B))
|
186 |
+
#define GOOGLE_DCHECK_GE(A, B) GOOGLE_DCHECK((A) >= (B))
|
187 |
+
|
188 |
+
#else // NDEBUG
|
189 |
+
|
190 |
+
#define GOOGLE_DLOG GOOGLE_LOG
|
191 |
+
|
192 |
+
#define GOOGLE_DCHECK GOOGLE_CHECK
|
193 |
+
#define GOOGLE_DCHECK_OK GOOGLE_CHECK_OK
|
194 |
+
#define GOOGLE_DCHECK_EQ GOOGLE_CHECK_EQ
|
195 |
+
#define GOOGLE_DCHECK_NE GOOGLE_CHECK_NE
|
196 |
+
#define GOOGLE_DCHECK_LT GOOGLE_CHECK_LT
|
197 |
+
#define GOOGLE_DCHECK_LE GOOGLE_CHECK_LE
|
198 |
+
#define GOOGLE_DCHECK_GT GOOGLE_CHECK_GT
|
199 |
+
#define GOOGLE_DCHECK_GE GOOGLE_CHECK_GE
|
200 |
+
|
201 |
+
#endif // !NDEBUG
|
202 |
+
|
203 |
+
typedef void LogHandler(LogLevel level, const char* filename, int line,
|
204 |
+
const std::string& message);
|
205 |
+
|
206 |
+
// The protobuf library sometimes writes warning and error messages to
|
207 |
+
// stderr. These messages are primarily useful for developers, but may
|
208 |
+
// also help end users figure out a problem. If you would prefer that
|
209 |
+
// these messages be sent somewhere other than stderr, call SetLogHandler()
|
210 |
+
// to set your own handler. This returns the old handler. Set the handler
|
211 |
+
// to NULL to ignore log messages (but see also LogSilencer, below).
|
212 |
+
//
|
213 |
+
// Obviously, SetLogHandler is not thread-safe. You should only call it
|
214 |
+
// at initialization time, and probably not from library code. If you
|
215 |
+
// simply want to suppress log messages temporarily (e.g. because you
|
216 |
+
// have some code that tends to trigger them frequently and you know
|
217 |
+
// the warnings are not important to you), use the LogSilencer class
|
218 |
+
// below.
|
219 |
+
LIBPROTOBUF_EXPORT LogHandler* SetLogHandler(LogHandler* new_func);
|
220 |
+
|
221 |
+
// Create a LogSilencer if you want to temporarily suppress all log
|
222 |
+
// messages. As long as any LogSilencer objects exist, non-fatal
|
223 |
+
// log messages will be discarded (the current LogHandler will *not*
|
224 |
+
// be called). Constructing a LogSilencer is thread-safe. You may
|
225 |
+
// accidentally suppress log messages occurring in another thread, but
|
226 |
+
// since messages are generally for debugging purposes only, this isn't
|
227 |
+
// a big deal. If you want to intercept log messages, use SetLogHandler().
|
228 |
+
class LIBPROTOBUF_EXPORT LogSilencer {
|
229 |
+
public:
|
230 |
+
LogSilencer();
|
231 |
+
~LogSilencer();
|
232 |
+
};
|
233 |
+
|
234 |
+
} // namespace protobuf
|
235 |
+
} // namespace google
|
236 |
+
|
237 |
+
#endif // GOOGLE_PROTOBUF_STUBS_LOGGING_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/macros.h
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
#ifndef GOOGLE_PROTOBUF_MACROS_H__
|
32 |
+
#define GOOGLE_PROTOBUF_MACROS_H__
|
33 |
+
|
34 |
+
#include <google/protobuf/stubs/port.h>
|
35 |
+
|
36 |
+
namespace google {
|
37 |
+
namespace protobuf {
|
38 |
+
|
39 |
+
#undef GOOGLE_DISALLOW_EVIL_CONSTRUCTORS
|
40 |
+
#define GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
|
41 |
+
TypeName(const TypeName&); \
|
42 |
+
void operator=(const TypeName&)
|
43 |
+
|
44 |
+
#undef GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS
|
45 |
+
#define GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
|
46 |
+
TypeName(); \
|
47 |
+
TypeName(const TypeName&); \
|
48 |
+
void operator=(const TypeName&)
|
49 |
+
|
50 |
+
// ===================================================================
|
51 |
+
// from google3/base/basictypes.h
|
52 |
+
|
53 |
+
// The GOOGLE_ARRAYSIZE(arr) macro returns the # of elements in an array arr.
|
54 |
+
// The expression is a compile-time constant, and therefore can be
|
55 |
+
// used in defining new arrays, for example.
|
56 |
+
//
|
57 |
+
// GOOGLE_ARRAYSIZE catches a few type errors. If you see a compiler error
|
58 |
+
//
|
59 |
+
// "warning: division by zero in ..."
|
60 |
+
//
|
61 |
+
// when using GOOGLE_ARRAYSIZE, you are (wrongfully) giving it a pointer.
|
62 |
+
// You should only use GOOGLE_ARRAYSIZE on statically allocated arrays.
|
63 |
+
//
|
64 |
+
// The following comments are on the implementation details, and can
|
65 |
+
// be ignored by the users.
|
66 |
+
//
|
67 |
+
// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in
|
68 |
+
// the array) and sizeof(*(arr)) (the # of bytes in one array
|
69 |
+
// element). If the former is divisible by the latter, perhaps arr is
|
70 |
+
// indeed an array, in which case the division result is the # of
|
71 |
+
// elements in the array. Otherwise, arr cannot possibly be an array,
|
72 |
+
// and we generate a compiler error to prevent the code from
|
73 |
+
// compiling.
|
74 |
+
//
|
75 |
+
// Since the size of bool is implementation-defined, we need to cast
|
76 |
+
// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
|
77 |
+
// result has type size_t.
|
78 |
+
//
|
79 |
+
// This macro is not perfect as it wrongfully accepts certain
|
80 |
+
// pointers, namely where the pointer size is divisible by the pointee
|
81 |
+
// size. Since all our code has to go through a 32-bit compiler,
|
82 |
+
// where a pointer is 4 bytes, this means all pointers to a type whose
|
83 |
+
// size is 3 or greater than 4 will be (righteously) rejected.
|
84 |
+
//
|
85 |
+
// Kudos to Jorg Brown for this simple and elegant implementation.
|
86 |
+
|
87 |
+
#undef GOOGLE_ARRAYSIZE
|
88 |
+
#define GOOGLE_ARRAYSIZE(a) \
|
89 |
+
((sizeof(a) / sizeof(*(a))) / \
|
90 |
+
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
|
91 |
+
|
92 |
+
// The COMPILE_ASSERT macro can be used to verify that a compile time
|
93 |
+
// expression is true. For example, you could use it to verify the
|
94 |
+
// size of a static array:
|
95 |
+
//
|
96 |
+
// COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
|
97 |
+
// content_type_names_incorrect_size);
|
98 |
+
//
|
99 |
+
// or to make sure a struct is smaller than a certain size:
|
100 |
+
//
|
101 |
+
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
|
102 |
+
//
|
103 |
+
// The second argument to the macro is the name of the variable. If
|
104 |
+
// the expression is false, most compilers will issue a warning/error
|
105 |
+
// containing the name of the variable.
|
106 |
+
|
107 |
+
namespace internal {
|
108 |
+
|
109 |
+
template <bool>
|
110 |
+
struct CompileAssert {
|
111 |
+
};
|
112 |
+
|
113 |
+
} // namespace internal
|
114 |
+
|
115 |
+
#undef GOOGLE_COMPILE_ASSERT
|
116 |
+
#if __cplusplus >= 201103L
|
117 |
+
#define GOOGLE_COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
|
118 |
+
#else
|
119 |
+
#define GOOGLE_COMPILE_ASSERT(expr, msg) \
|
120 |
+
::google::protobuf::internal::CompileAssert<(bool(expr))> \
|
121 |
+
msg[bool(expr) ? 1 : -1]; \
|
122 |
+
(void)msg
|
123 |
+
// Implementation details of COMPILE_ASSERT:
|
124 |
+
//
|
125 |
+
// - COMPILE_ASSERT works by defining an array type that has -1
|
126 |
+
// elements (and thus is invalid) when the expression is false.
|
127 |
+
//
|
128 |
+
// - The simpler definition
|
129 |
+
//
|
130 |
+
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
|
131 |
+
//
|
132 |
+
// does not work, as gcc supports variable-length arrays whose sizes
|
133 |
+
// are determined at run-time (this is gcc's extension and not part
|
134 |
+
// of the C++ standard). As a result, gcc fails to reject the
|
135 |
+
// following code with the simple definition:
|
136 |
+
//
|
137 |
+
// int foo;
|
138 |
+
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
|
139 |
+
// // not a compile-time constant.
|
140 |
+
//
|
141 |
+
// - By using the type CompileAssert<(bool(expr))>, we ensures that
|
142 |
+
// expr is a compile-time constant. (Template arguments must be
|
143 |
+
// determined at compile-time.)
|
144 |
+
//
|
145 |
+
// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
|
146 |
+
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
|
147 |
+
//
|
148 |
+
// CompileAssert<bool(expr)>
|
149 |
+
//
|
150 |
+
// instead, these compilers will refuse to compile
|
151 |
+
//
|
152 |
+
// COMPILE_ASSERT(5 > 0, some_message);
|
153 |
+
//
|
154 |
+
// (They seem to think the ">" in "5 > 0" marks the end of the
|
155 |
+
// template argument list.)
|
156 |
+
//
|
157 |
+
// - The array size is (bool(expr) ? 1 : -1), instead of simply
|
158 |
+
//
|
159 |
+
// ((expr) ? 1 : -1).
|
160 |
+
//
|
161 |
+
// This is to avoid running into a bug in MS VC 7.1, which
|
162 |
+
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
|
163 |
+
#endif // __cplusplus >= 201103L
|
164 |
+
|
165 |
+
} // namespace protobuf
|
166 |
+
} // namespace google
|
167 |
+
|
168 |
+
#endif // GOOGLE_PROTOBUF_MACROS_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/map_util.h
ADDED
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2014 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// from google3/util/gtl/map_util.h
|
32 |
+
// Author: Anton Carver
|
33 |
+
|
34 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
|
35 |
+
#define GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
|
36 |
+
|
37 |
+
#include <stddef.h>
|
38 |
+
#include <iterator>
|
39 |
+
#include <string>
|
40 |
+
#include <utility>
|
41 |
+
#include <vector>
|
42 |
+
|
43 |
+
#include <google/protobuf/stubs/common.h>
|
44 |
+
|
45 |
+
namespace google {
|
46 |
+
namespace protobuf {
|
47 |
+
namespace internal {
|
48 |
+
// Local implementation of RemoveConst to avoid including base/type_traits.h.
|
49 |
+
template <class T> struct RemoveConst { typedef T type; };
|
50 |
+
template <class T> struct RemoveConst<const T> : RemoveConst<T> {};
|
51 |
+
} // namespace internal
|
52 |
+
|
53 |
+
//
|
54 |
+
// Find*()
|
55 |
+
//
|
56 |
+
|
57 |
+
// Returns a const reference to the value associated with the given key if it
|
58 |
+
// exists. Crashes otherwise.
|
59 |
+
//
|
60 |
+
// This is intended as a replacement for operator[] as an rvalue (for reading)
|
61 |
+
// when the key is guaranteed to exist.
|
62 |
+
//
|
63 |
+
// operator[] for lookup is discouraged for several reasons:
|
64 |
+
// * It has a side-effect of inserting missing keys
|
65 |
+
// * It is not thread-safe (even when it is not inserting, it can still
|
66 |
+
// choose to resize the underlying storage)
|
67 |
+
// * It invalidates iterators (when it chooses to resize)
|
68 |
+
// * It default constructs a value object even if it doesn't need to
|
69 |
+
//
|
70 |
+
// This version assumes the key is printable, and includes it in the fatal log
|
71 |
+
// message.
|
72 |
+
template <class Collection>
|
73 |
+
const typename Collection::value_type::second_type&
|
74 |
+
FindOrDie(const Collection& collection,
|
75 |
+
const typename Collection::value_type::first_type& key) {
|
76 |
+
typename Collection::const_iterator it = collection.find(key);
|
77 |
+
GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key;
|
78 |
+
return it->second;
|
79 |
+
}
|
80 |
+
|
81 |
+
// Same as above, but returns a non-const reference.
|
82 |
+
template <class Collection>
|
83 |
+
typename Collection::value_type::second_type&
|
84 |
+
FindOrDie(Collection& collection, // NOLINT
|
85 |
+
const typename Collection::value_type::first_type& key) {
|
86 |
+
typename Collection::iterator it = collection.find(key);
|
87 |
+
GOOGLE_CHECK(it != collection.end()) << "Map key not found: " << key;
|
88 |
+
return it->second;
|
89 |
+
}
|
90 |
+
|
91 |
+
// Same as FindOrDie above, but doesn't log the key on failure.
|
92 |
+
template <class Collection>
|
93 |
+
const typename Collection::value_type::second_type&
|
94 |
+
FindOrDieNoPrint(const Collection& collection,
|
95 |
+
const typename Collection::value_type::first_type& key) {
|
96 |
+
typename Collection::const_iterator it = collection.find(key);
|
97 |
+
GOOGLE_CHECK(it != collection.end()) << "Map key not found";
|
98 |
+
return it->second;
|
99 |
+
}
|
100 |
+
|
101 |
+
// Same as above, but returns a non-const reference.
|
102 |
+
template <class Collection>
|
103 |
+
typename Collection::value_type::second_type&
|
104 |
+
FindOrDieNoPrint(Collection& collection, // NOLINT
|
105 |
+
const typename Collection::value_type::first_type& key) {
|
106 |
+
typename Collection::iterator it = collection.find(key);
|
107 |
+
GOOGLE_CHECK(it != collection.end()) << "Map key not found";
|
108 |
+
return it->second;
|
109 |
+
}
|
110 |
+
|
111 |
+
// Returns a const reference to the value associated with the given key if it
|
112 |
+
// exists, otherwise returns a const reference to the provided default value.
|
113 |
+
//
|
114 |
+
// WARNING: If a temporary object is passed as the default "value,"
|
115 |
+
// this function will return a reference to that temporary object,
|
116 |
+
// which will be destroyed at the end of the statement. A common
|
117 |
+
// example: if you have a map with string values, and you pass a char*
|
118 |
+
// as the default "value," either use the returned value immediately
|
119 |
+
// or store it in a string (not string&).
|
120 |
+
// Details: http://go/findwithdefault
|
121 |
+
template <class Collection>
|
122 |
+
const typename Collection::value_type::second_type&
|
123 |
+
FindWithDefault(const Collection& collection,
|
124 |
+
const typename Collection::value_type::first_type& key,
|
125 |
+
const typename Collection::value_type::second_type& value) {
|
126 |
+
typename Collection::const_iterator it = collection.find(key);
|
127 |
+
if (it == collection.end()) {
|
128 |
+
return value;
|
129 |
+
}
|
130 |
+
return it->second;
|
131 |
+
}
|
132 |
+
|
133 |
+
// Returns a pointer to the const value associated with the given key if it
|
134 |
+
// exists, or NULL otherwise.
|
135 |
+
template <class Collection>
|
136 |
+
const typename Collection::value_type::second_type*
|
137 |
+
FindOrNull(const Collection& collection,
|
138 |
+
const typename Collection::value_type::first_type& key) {
|
139 |
+
typename Collection::const_iterator it = collection.find(key);
|
140 |
+
if (it == collection.end()) {
|
141 |
+
return 0;
|
142 |
+
}
|
143 |
+
return &it->second;
|
144 |
+
}
|
145 |
+
|
146 |
+
// Same as above but returns a pointer to the non-const value.
|
147 |
+
template <class Collection>
|
148 |
+
typename Collection::value_type::second_type*
|
149 |
+
FindOrNull(Collection& collection, // NOLINT
|
150 |
+
const typename Collection::value_type::first_type& key) {
|
151 |
+
typename Collection::iterator it = collection.find(key);
|
152 |
+
if (it == collection.end()) {
|
153 |
+
return 0;
|
154 |
+
}
|
155 |
+
return &it->second;
|
156 |
+
}
|
157 |
+
|
158 |
+
// Returns the pointer value associated with the given key. If none is found,
|
159 |
+
// NULL is returned. The function is designed to be used with a map of keys to
|
160 |
+
// pointers.
|
161 |
+
//
|
162 |
+
// This function does not distinguish between a missing key and a key mapped
|
163 |
+
// to a NULL value.
|
164 |
+
template <class Collection>
|
165 |
+
typename Collection::value_type::second_type
|
166 |
+
FindPtrOrNull(const Collection& collection,
|
167 |
+
const typename Collection::value_type::first_type& key) {
|
168 |
+
typename Collection::const_iterator it = collection.find(key);
|
169 |
+
if (it == collection.end()) {
|
170 |
+
return typename Collection::value_type::second_type();
|
171 |
+
}
|
172 |
+
return it->second;
|
173 |
+
}
|
174 |
+
|
175 |
+
// Same as above, except takes non-const reference to collection.
|
176 |
+
//
|
177 |
+
// This function is needed for containers that propagate constness to the
|
178 |
+
// pointee, such as boost::ptr_map.
|
179 |
+
template <class Collection>
|
180 |
+
typename Collection::value_type::second_type
|
181 |
+
FindPtrOrNull(Collection& collection, // NOLINT
|
182 |
+
const typename Collection::value_type::first_type& key) {
|
183 |
+
typename Collection::iterator it = collection.find(key);
|
184 |
+
if (it == collection.end()) {
|
185 |
+
return typename Collection::value_type::second_type();
|
186 |
+
}
|
187 |
+
return it->second;
|
188 |
+
}
|
189 |
+
|
190 |
+
// Finds the pointer value associated with the given key in a map whose values
|
191 |
+
// are linked_ptrs. Returns NULL if key is not found.
|
192 |
+
template <class Collection>
|
193 |
+
typename Collection::value_type::second_type::element_type*
|
194 |
+
FindLinkedPtrOrNull(const Collection& collection,
|
195 |
+
const typename Collection::value_type::first_type& key) {
|
196 |
+
typename Collection::const_iterator it = collection.find(key);
|
197 |
+
if (it == collection.end()) {
|
198 |
+
return 0;
|
199 |
+
}
|
200 |
+
// Since linked_ptr::get() is a const member returning a non const,
|
201 |
+
// we do not need a version of this function taking a non const collection.
|
202 |
+
return it->second.get();
|
203 |
+
}
|
204 |
+
|
205 |
+
// Same as above, but dies if the key is not found.
|
206 |
+
template <class Collection>
|
207 |
+
typename Collection::value_type::second_type::element_type&
|
208 |
+
FindLinkedPtrOrDie(const Collection& collection,
|
209 |
+
const typename Collection::value_type::first_type& key) {
|
210 |
+
typename Collection::const_iterator it = collection.find(key);
|
211 |
+
GOOGLE_CHECK(it != collection.end()) << "key not found: " << key;
|
212 |
+
// Since linked_ptr::operator*() is a const member returning a non const,
|
213 |
+
// we do not need a version of this function taking a non const collection.
|
214 |
+
return *it->second;
|
215 |
+
}
|
216 |
+
|
217 |
+
// Finds the value associated with the given key and copies it to *value (if not
|
218 |
+
// NULL). Returns false if the key was not found, true otherwise.
|
219 |
+
template <class Collection, class Key, class Value>
|
220 |
+
bool FindCopy(const Collection& collection,
|
221 |
+
const Key& key,
|
222 |
+
Value* const value) {
|
223 |
+
typename Collection::const_iterator it = collection.find(key);
|
224 |
+
if (it == collection.end()) {
|
225 |
+
return false;
|
226 |
+
}
|
227 |
+
if (value) {
|
228 |
+
*value = it->second;
|
229 |
+
}
|
230 |
+
return true;
|
231 |
+
}
|
232 |
+
|
233 |
+
//
|
234 |
+
// Contains*()
|
235 |
+
//
|
236 |
+
|
237 |
+
// Returns true if and only if the given collection contains the given key.
|
238 |
+
template <class Collection, class Key>
|
239 |
+
bool ContainsKey(const Collection& collection, const Key& key) {
|
240 |
+
return collection.find(key) != collection.end();
|
241 |
+
}
|
242 |
+
|
243 |
+
// Returns true if and only if the given collection contains the given key-value
|
244 |
+
// pair.
|
245 |
+
template <class Collection, class Key, class Value>
|
246 |
+
bool ContainsKeyValuePair(const Collection& collection,
|
247 |
+
const Key& key,
|
248 |
+
const Value& value) {
|
249 |
+
typedef typename Collection::const_iterator const_iterator;
|
250 |
+
std::pair<const_iterator, const_iterator> range = collection.equal_range(key);
|
251 |
+
for (const_iterator it = range.first; it != range.second; ++it) {
|
252 |
+
if (it->second == value) {
|
253 |
+
return true;
|
254 |
+
}
|
255 |
+
}
|
256 |
+
return false;
|
257 |
+
}
|
258 |
+
|
259 |
+
//
|
260 |
+
// Insert*()
|
261 |
+
//
|
262 |
+
|
263 |
+
// Inserts the given key-value pair into the collection. Returns true if and
|
264 |
+
// only if the key from the given pair didn't previously exist. Otherwise, the
|
265 |
+
// value in the map is replaced with the value from the given pair.
|
266 |
+
template <class Collection>
|
267 |
+
bool InsertOrUpdate(Collection* const collection,
|
268 |
+
const typename Collection::value_type& vt) {
|
269 |
+
std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
|
270 |
+
if (!ret.second) {
|
271 |
+
// update
|
272 |
+
ret.first->second = vt.second;
|
273 |
+
return false;
|
274 |
+
}
|
275 |
+
return true;
|
276 |
+
}
|
277 |
+
|
278 |
+
// Same as above, except that the key and value are passed separately.
|
279 |
+
template <class Collection>
|
280 |
+
bool InsertOrUpdate(Collection* const collection,
|
281 |
+
const typename Collection::value_type::first_type& key,
|
282 |
+
const typename Collection::value_type::second_type& value) {
|
283 |
+
return InsertOrUpdate(
|
284 |
+
collection, typename Collection::value_type(key, value));
|
285 |
+
}
|
286 |
+
|
287 |
+
// Inserts/updates all the key-value pairs from the range defined by the
|
288 |
+
// iterators "first" and "last" into the given collection.
|
289 |
+
template <class Collection, class InputIterator>
|
290 |
+
void InsertOrUpdateMany(Collection* const collection,
|
291 |
+
InputIterator first, InputIterator last) {
|
292 |
+
for (; first != last; ++first) {
|
293 |
+
InsertOrUpdate(collection, *first);
|
294 |
+
}
|
295 |
+
}
|
296 |
+
|
297 |
+
// Change the value associated with a particular key in a map or hash_map
|
298 |
+
// of the form map<Key, Value*> which owns the objects pointed to by the
|
299 |
+
// value pointers. If there was an existing value for the key, it is deleted.
|
300 |
+
// True indicates an insert took place, false indicates an update + delete.
|
301 |
+
template <class Collection>
|
302 |
+
bool InsertAndDeleteExisting(
|
303 |
+
Collection* const collection,
|
304 |
+
const typename Collection::value_type::first_type& key,
|
305 |
+
const typename Collection::value_type::second_type& value) {
|
306 |
+
std::pair<typename Collection::iterator, bool> ret =
|
307 |
+
collection->insert(typename Collection::value_type(key, value));
|
308 |
+
if (!ret.second) {
|
309 |
+
delete ret.first->second;
|
310 |
+
ret.first->second = value;
|
311 |
+
return false;
|
312 |
+
}
|
313 |
+
return true;
|
314 |
+
}
|
315 |
+
|
316 |
+
// Inserts the given key and value into the given collection if and only if the
|
317 |
+
// given key did NOT already exist in the collection. If the key previously
|
318 |
+
// existed in the collection, the value is not changed. Returns true if the
|
319 |
+
// key-value pair was inserted; returns false if the key was already present.
|
320 |
+
template <class Collection>
|
321 |
+
bool InsertIfNotPresent(Collection* const collection,
|
322 |
+
const typename Collection::value_type& vt) {
|
323 |
+
return collection->insert(vt).second;
|
324 |
+
}
|
325 |
+
|
326 |
+
// Same as above except the key and value are passed separately.
|
327 |
+
template <class Collection>
|
328 |
+
bool InsertIfNotPresent(
|
329 |
+
Collection* const collection,
|
330 |
+
const typename Collection::value_type::first_type& key,
|
331 |
+
const typename Collection::value_type::second_type& value) {
|
332 |
+
return InsertIfNotPresent(
|
333 |
+
collection, typename Collection::value_type(key, value));
|
334 |
+
}
|
335 |
+
|
336 |
+
// Same as above except dies if the key already exists in the collection.
|
337 |
+
template <class Collection>
|
338 |
+
void InsertOrDie(Collection* const collection,
|
339 |
+
const typename Collection::value_type& value) {
|
340 |
+
GOOGLE_CHECK(InsertIfNotPresent(collection, value))
|
341 |
+
<< "duplicate value: " << value;
|
342 |
+
}
|
343 |
+
|
344 |
+
// Same as above except doesn't log the value on error.
|
345 |
+
template <class Collection>
|
346 |
+
void InsertOrDieNoPrint(Collection* const collection,
|
347 |
+
const typename Collection::value_type& value) {
|
348 |
+
GOOGLE_CHECK(InsertIfNotPresent(collection, value)) << "duplicate value.";
|
349 |
+
}
|
350 |
+
|
351 |
+
// Inserts the key-value pair into the collection. Dies if key was already
|
352 |
+
// present.
|
353 |
+
template <class Collection>
|
354 |
+
void InsertOrDie(Collection* const collection,
|
355 |
+
const typename Collection::value_type::first_type& key,
|
356 |
+
const typename Collection::value_type::second_type& data) {
|
357 |
+
GOOGLE_CHECK(InsertIfNotPresent(collection, key, data))
|
358 |
+
<< "duplicate key: " << key;
|
359 |
+
}
|
360 |
+
|
361 |
+
// Same as above except doesn't log the key on error.
|
362 |
+
template <class Collection>
|
363 |
+
void InsertOrDieNoPrint(
|
364 |
+
Collection* const collection,
|
365 |
+
const typename Collection::value_type::first_type& key,
|
366 |
+
const typename Collection::value_type::second_type& data) {
|
367 |
+
GOOGLE_CHECK(InsertIfNotPresent(collection, key, data)) << "duplicate key.";
|
368 |
+
}
|
369 |
+
|
370 |
+
// Inserts a new key and default-initialized value. Dies if the key was already
|
371 |
+
// present. Returns a reference to the value. Example usage:
|
372 |
+
//
|
373 |
+
// map<int, SomeProto> m;
|
374 |
+
// SomeProto& proto = InsertKeyOrDie(&m, 3);
|
375 |
+
// proto.set_field("foo");
|
376 |
+
template <class Collection>
|
377 |
+
typename Collection::value_type::second_type& InsertKeyOrDie(
|
378 |
+
Collection* const collection,
|
379 |
+
const typename Collection::value_type::first_type& key) {
|
380 |
+
typedef typename Collection::value_type value_type;
|
381 |
+
std::pair<typename Collection::iterator, bool> res =
|
382 |
+
collection->insert(value_type(key, typename value_type::second_type()));
|
383 |
+
GOOGLE_CHECK(res.second) << "duplicate key: " << key;
|
384 |
+
return res.first->second;
|
385 |
+
}
|
386 |
+
|
387 |
+
//
|
388 |
+
// Lookup*()
|
389 |
+
//
|
390 |
+
|
391 |
+
// Looks up a given key and value pair in a collection and inserts the key-value
|
392 |
+
// pair if it's not already present. Returns a reference to the value associated
|
393 |
+
// with the key.
|
394 |
+
template <class Collection>
|
395 |
+
typename Collection::value_type::second_type&
|
396 |
+
LookupOrInsert(Collection* const collection,
|
397 |
+
const typename Collection::value_type& vt) {
|
398 |
+
return collection->insert(vt).first->second;
|
399 |
+
}
|
400 |
+
|
401 |
+
// Same as above except the key-value are passed separately.
|
402 |
+
template <class Collection>
|
403 |
+
typename Collection::value_type::second_type&
|
404 |
+
LookupOrInsert(Collection* const collection,
|
405 |
+
const typename Collection::value_type::first_type& key,
|
406 |
+
const typename Collection::value_type::second_type& value) {
|
407 |
+
return LookupOrInsert(
|
408 |
+
collection, typename Collection::value_type(key, value));
|
409 |
+
}
|
410 |
+
|
411 |
+
// Counts the number of equivalent elements in the given "sequence", and stores
|
412 |
+
// the results in "count_map" with element as the key and count as the value.
|
413 |
+
//
|
414 |
+
// Example:
|
415 |
+
// vector<string> v = {"a", "b", "c", "a", "b"};
|
416 |
+
// map<string, int> m;
|
417 |
+
// AddTokenCounts(v, 1, &m);
|
418 |
+
// assert(m["a"] == 2);
|
419 |
+
// assert(m["b"] == 2);
|
420 |
+
// assert(m["c"] == 1);
|
421 |
+
template <typename Sequence, typename Collection>
|
422 |
+
void AddTokenCounts(
|
423 |
+
const Sequence& sequence,
|
424 |
+
const typename Collection::value_type::second_type& increment,
|
425 |
+
Collection* const count_map) {
|
426 |
+
for (typename Sequence::const_iterator it = sequence.begin();
|
427 |
+
it != sequence.end(); ++it) {
|
428 |
+
typename Collection::value_type::second_type& value =
|
429 |
+
LookupOrInsert(count_map, *it,
|
430 |
+
typename Collection::value_type::second_type());
|
431 |
+
value += increment;
|
432 |
+
}
|
433 |
+
}
|
434 |
+
|
435 |
+
// Returns a reference to the value associated with key. If not found, a value
|
436 |
+
// is default constructed on the heap and added to the map.
|
437 |
+
//
|
438 |
+
// This function is useful for containers of the form map<Key, Value*>, where
|
439 |
+
// inserting a new key, value pair involves constructing a new heap-allocated
|
440 |
+
// Value, and storing a pointer to that in the collection.
|
441 |
+
template <class Collection>
|
442 |
+
typename Collection::value_type::second_type&
|
443 |
+
LookupOrInsertNew(Collection* const collection,
|
444 |
+
const typename Collection::value_type::first_type& key) {
|
445 |
+
typedef typename std::iterator_traits<
|
446 |
+
typename Collection::value_type::second_type>::value_type Element;
|
447 |
+
std::pair<typename Collection::iterator, bool> ret =
|
448 |
+
collection->insert(typename Collection::value_type(
|
449 |
+
key,
|
450 |
+
static_cast<typename Collection::value_type::second_type>(NULL)));
|
451 |
+
if (ret.second) {
|
452 |
+
ret.first->second = new Element();
|
453 |
+
}
|
454 |
+
return ret.first->second;
|
455 |
+
}
|
456 |
+
|
457 |
+
// Same as above but constructs the value using the single-argument constructor
|
458 |
+
// and the given "arg".
|
459 |
+
template <class Collection, class Arg>
|
460 |
+
typename Collection::value_type::second_type&
|
461 |
+
LookupOrInsertNew(Collection* const collection,
|
462 |
+
const typename Collection::value_type::first_type& key,
|
463 |
+
const Arg& arg) {
|
464 |
+
typedef typename std::iterator_traits<
|
465 |
+
typename Collection::value_type::second_type>::value_type Element;
|
466 |
+
std::pair<typename Collection::iterator, bool> ret =
|
467 |
+
collection->insert(typename Collection::value_type(
|
468 |
+
key,
|
469 |
+
static_cast<typename Collection::value_type::second_type>(NULL)));
|
470 |
+
if (ret.second) {
|
471 |
+
ret.first->second = new Element(arg);
|
472 |
+
}
|
473 |
+
return ret.first->second;
|
474 |
+
}
|
475 |
+
|
476 |
+
// Lookup of linked/shared pointers is used in two scenarios:
|
477 |
+
//
|
478 |
+
// Use LookupOrInsertNewLinkedPtr if the container owns the elements.
|
479 |
+
// In this case it is fine working with the raw pointer as long as it is
|
480 |
+
// guaranteed that no other thread can delete/update an accessed element.
|
481 |
+
// A mutex will need to lock the container operation as well as the use
|
482 |
+
// of the returned elements. Finding an element may be performed using
|
483 |
+
// FindLinkedPtr*().
|
484 |
+
//
|
485 |
+
// Use LookupOrInsertNewSharedPtr if the container does not own the elements
|
486 |
+
// for their whole lifetime. This is typically the case when a reader allows
|
487 |
+
// parallel updates to the container. In this case a Mutex only needs to lock
|
488 |
+
// container operations, but all element operations must be performed on the
|
489 |
+
// shared pointer. Finding an element must be performed using FindPtr*() and
|
490 |
+
// cannot be done with FindLinkedPtr*() even though it compiles.
|
491 |
+
|
492 |
+
// Lookup a key in a map or hash_map whose values are linked_ptrs. If it is
|
493 |
+
// missing, set collection[key].reset(new Value::element_type) and return that.
|
494 |
+
// Value::element_type must be default constructable.
|
495 |
+
template <class Collection>
|
496 |
+
typename Collection::value_type::second_type::element_type*
|
497 |
+
LookupOrInsertNewLinkedPtr(
|
498 |
+
Collection* const collection,
|
499 |
+
const typename Collection::value_type::first_type& key) {
|
500 |
+
typedef typename Collection::value_type::second_type Value;
|
501 |
+
std::pair<typename Collection::iterator, bool> ret =
|
502 |
+
collection->insert(typename Collection::value_type(key, Value()));
|
503 |
+
if (ret.second) {
|
504 |
+
ret.first->second.reset(new typename Value::element_type);
|
505 |
+
}
|
506 |
+
return ret.first->second.get();
|
507 |
+
}
|
508 |
+
|
509 |
+
// A variant of LookupOrInsertNewLinkedPtr where the value is constructed using
|
510 |
+
// a single-parameter constructor. Note: the constructor argument is computed
|
511 |
+
// even if it will not be used, so only values cheap to compute should be passed
|
512 |
+
// here. On the other hand it does not matter how expensive the construction of
|
513 |
+
// the actual stored value is, as that only occurs if necessary.
|
514 |
+
template <class Collection, class Arg>
|
515 |
+
typename Collection::value_type::second_type::element_type*
|
516 |
+
LookupOrInsertNewLinkedPtr(
|
517 |
+
Collection* const collection,
|
518 |
+
const typename Collection::value_type::first_type& key,
|
519 |
+
const Arg& arg) {
|
520 |
+
typedef typename Collection::value_type::second_type Value;
|
521 |
+
std::pair<typename Collection::iterator, bool> ret =
|
522 |
+
collection->insert(typename Collection::value_type(key, Value()));
|
523 |
+
if (ret.second) {
|
524 |
+
ret.first->second.reset(new typename Value::element_type(arg));
|
525 |
+
}
|
526 |
+
return ret.first->second.get();
|
527 |
+
}
|
528 |
+
|
529 |
+
// Lookup a key in a map or hash_map whose values are shared_ptrs. If it is
|
530 |
+
// missing, set collection[key].reset(new Value::element_type). Unlike
|
531 |
+
// LookupOrInsertNewLinkedPtr, this function returns the shared_ptr instead of
|
532 |
+
// the raw pointer. Value::element_type must be default constructable.
|
533 |
+
template <class Collection>
|
534 |
+
typename Collection::value_type::second_type&
|
535 |
+
LookupOrInsertNewSharedPtr(
|
536 |
+
Collection* const collection,
|
537 |
+
const typename Collection::value_type::first_type& key) {
|
538 |
+
typedef typename Collection::value_type::second_type SharedPtr;
|
539 |
+
typedef typename Collection::value_type::second_type::element_type Element;
|
540 |
+
std::pair<typename Collection::iterator, bool> ret =
|
541 |
+
collection->insert(typename Collection::value_type(key, SharedPtr()));
|
542 |
+
if (ret.second) {
|
543 |
+
ret.first->second.reset(new Element());
|
544 |
+
}
|
545 |
+
return ret.first->second;
|
546 |
+
}
|
547 |
+
|
548 |
+
// A variant of LookupOrInsertNewSharedPtr where the value is constructed using
|
549 |
+
// a single-parameter constructor. Note: the constructor argument is computed
|
550 |
+
// even if it will not be used, so only values cheap to compute should be passed
|
551 |
+
// here. On the other hand it does not matter how expensive the construction of
|
552 |
+
// the actual stored value is, as that only occurs if necessary.
|
553 |
+
template <class Collection, class Arg>
|
554 |
+
typename Collection::value_type::second_type&
|
555 |
+
LookupOrInsertNewSharedPtr(
|
556 |
+
Collection* const collection,
|
557 |
+
const typename Collection::value_type::first_type& key,
|
558 |
+
const Arg& arg) {
|
559 |
+
typedef typename Collection::value_type::second_type SharedPtr;
|
560 |
+
typedef typename Collection::value_type::second_type::element_type Element;
|
561 |
+
std::pair<typename Collection::iterator, bool> ret =
|
562 |
+
collection->insert(typename Collection::value_type(key, SharedPtr()));
|
563 |
+
if (ret.second) {
|
564 |
+
ret.first->second.reset(new Element(arg));
|
565 |
+
}
|
566 |
+
return ret.first->second;
|
567 |
+
}
|
568 |
+
|
569 |
+
//
|
570 |
+
// Misc Utility Functions
|
571 |
+
//
|
572 |
+
|
573 |
+
// Updates the value associated with the given key. If the key was not already
|
574 |
+
// present, then the key-value pair are inserted and "previous" is unchanged. If
|
575 |
+
// the key was already present, the value is updated and "*previous" will
|
576 |
+
// contain a copy of the old value.
|
577 |
+
//
|
578 |
+
// InsertOrReturnExisting has complementary behavior that returns the
|
579 |
+
// address of an already existing value, rather than updating it.
|
580 |
+
template <class Collection>
|
581 |
+
bool UpdateReturnCopy(Collection* const collection,
|
582 |
+
const typename Collection::value_type::first_type& key,
|
583 |
+
const typename Collection::value_type::second_type& value,
|
584 |
+
typename Collection::value_type::second_type* previous) {
|
585 |
+
std::pair<typename Collection::iterator, bool> ret =
|
586 |
+
collection->insert(typename Collection::value_type(key, value));
|
587 |
+
if (!ret.second) {
|
588 |
+
// update
|
589 |
+
if (previous) {
|
590 |
+
*previous = ret.first->second;
|
591 |
+
}
|
592 |
+
ret.first->second = value;
|
593 |
+
return true;
|
594 |
+
}
|
595 |
+
return false;
|
596 |
+
}
|
597 |
+
|
598 |
+
// Same as above except that the key and value are passed as a pair.
|
599 |
+
template <class Collection>
|
600 |
+
bool UpdateReturnCopy(Collection* const collection,
|
601 |
+
const typename Collection::value_type& vt,
|
602 |
+
typename Collection::value_type::second_type* previous) {
|
603 |
+
std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
|
604 |
+
if (!ret.second) {
|
605 |
+
// update
|
606 |
+
if (previous) {
|
607 |
+
*previous = ret.first->second;
|
608 |
+
}
|
609 |
+
ret.first->second = vt.second;
|
610 |
+
return true;
|
611 |
+
}
|
612 |
+
return false;
|
613 |
+
}
|
614 |
+
|
615 |
+
// Tries to insert the given key-value pair into the collection. Returns NULL if
|
616 |
+
// the insert succeeds. Otherwise, returns a pointer to the existing value.
|
617 |
+
//
|
618 |
+
// This complements UpdateReturnCopy in that it allows to update only after
|
619 |
+
// verifying the old value and still insert quickly without having to look up
|
620 |
+
// twice. Unlike UpdateReturnCopy this also does not come with the issue of an
|
621 |
+
// undefined previous* in case new data was inserted.
|
622 |
+
template <class Collection>
|
623 |
+
typename Collection::value_type::second_type* const
|
624 |
+
InsertOrReturnExisting(Collection* const collection,
|
625 |
+
const typename Collection::value_type& vt) {
|
626 |
+
std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
|
627 |
+
if (ret.second) {
|
628 |
+
return NULL; // Inserted, no existing previous value.
|
629 |
+
} else {
|
630 |
+
return &ret.first->second; // Return address of already existing value.
|
631 |
+
}
|
632 |
+
}
|
633 |
+
|
634 |
+
// Same as above, except for explicit key and data.
|
635 |
+
template <class Collection>
|
636 |
+
typename Collection::value_type::second_type* const
|
637 |
+
InsertOrReturnExisting(
|
638 |
+
Collection* const collection,
|
639 |
+
const typename Collection::value_type::first_type& key,
|
640 |
+
const typename Collection::value_type::second_type& data) {
|
641 |
+
return InsertOrReturnExisting(collection,
|
642 |
+
typename Collection::value_type(key, data));
|
643 |
+
}
|
644 |
+
|
645 |
+
// Erases the collection item identified by the given key, and returns the value
|
646 |
+
// associated with that key. It is assumed that the value (i.e., the
|
647 |
+
// mapped_type) is a pointer. Returns NULL if the key was not found in the
|
648 |
+
// collection.
|
649 |
+
//
|
650 |
+
// Examples:
|
651 |
+
// map<string, MyType*> my_map;
|
652 |
+
//
|
653 |
+
// One line cleanup:
|
654 |
+
// delete EraseKeyReturnValuePtr(&my_map, "abc");
|
655 |
+
//
|
656 |
+
// Use returned value:
|
657 |
+
// std::unique_ptr<MyType> value_ptr(
|
658 |
+
// EraseKeyReturnValuePtr(&my_map, "abc"));
|
659 |
+
// if (value_ptr.get())
|
660 |
+
// value_ptr->DoSomething();
|
661 |
+
//
|
662 |
+
template <class Collection>
|
663 |
+
typename Collection::value_type::second_type EraseKeyReturnValuePtr(
|
664 |
+
Collection* const collection,
|
665 |
+
const typename Collection::value_type::first_type& key) {
|
666 |
+
typename Collection::iterator it = collection->find(key);
|
667 |
+
if (it == collection->end()) {
|
668 |
+
return NULL;
|
669 |
+
}
|
670 |
+
typename Collection::value_type::second_type v = it->second;
|
671 |
+
collection->erase(it);
|
672 |
+
return v;
|
673 |
+
}
|
674 |
+
|
675 |
+
// Inserts all the keys from map_container into key_container, which must
|
676 |
+
// support insert(MapContainer::key_type).
|
677 |
+
//
|
678 |
+
// Note: any initial contents of the key_container are not cleared.
|
679 |
+
template <class MapContainer, class KeyContainer>
|
680 |
+
void InsertKeysFromMap(const MapContainer& map_container,
|
681 |
+
KeyContainer* key_container) {
|
682 |
+
GOOGLE_CHECK(key_container != NULL);
|
683 |
+
for (typename MapContainer::const_iterator it = map_container.begin();
|
684 |
+
it != map_container.end(); ++it) {
|
685 |
+
key_container->insert(it->first);
|
686 |
+
}
|
687 |
+
}
|
688 |
+
|
689 |
+
// Appends all the keys from map_container into key_container, which must
|
690 |
+
// support push_back(MapContainer::key_type).
|
691 |
+
//
|
692 |
+
// Note: any initial contents of the key_container are not cleared.
|
693 |
+
template <class MapContainer, class KeyContainer>
|
694 |
+
void AppendKeysFromMap(const MapContainer& map_container,
|
695 |
+
KeyContainer* key_container) {
|
696 |
+
GOOGLE_CHECK(key_container != NULL);
|
697 |
+
for (typename MapContainer::const_iterator it = map_container.begin();
|
698 |
+
it != map_container.end(); ++it) {
|
699 |
+
key_container->push_back(it->first);
|
700 |
+
}
|
701 |
+
}
|
702 |
+
|
703 |
+
// A more specialized overload of AppendKeysFromMap to optimize reallocations
|
704 |
+
// for the common case in which we're appending keys to a vector and hence can
|
705 |
+
// (and sometimes should) call reserve() first.
|
706 |
+
//
|
707 |
+
// (It would be possible to play SFINAE games to call reserve() for any
|
708 |
+
// container that supports it, but this seems to get us 99% of what we need
|
709 |
+
// without the complexity of a SFINAE-based solution.)
|
710 |
+
template <class MapContainer, class KeyType>
|
711 |
+
void AppendKeysFromMap(const MapContainer& map_container,
|
712 |
+
std::vector<KeyType>* key_container) {
|
713 |
+
GOOGLE_CHECK(key_container != NULL);
|
714 |
+
// We now have the opportunity to call reserve(). Calling reserve() every
|
715 |
+
// time is a bad idea for some use cases: libstdc++'s implementation of
|
716 |
+
// vector<>::reserve() resizes the vector's backing store to exactly the
|
717 |
+
// given size (unless it's already at least that big). Because of this,
|
718 |
+
// the use case that involves appending a lot of small maps (total size
|
719 |
+
// N) one by one to a vector would be O(N^2). But never calling reserve()
|
720 |
+
// loses the opportunity to improve the use case of adding from a large
|
721 |
+
// map to an empty vector (this improves performance by up to 33%). A
|
722 |
+
// number of heuristics are possible; see the discussion in
|
723 |
+
// cl/34081696. Here we use the simplest one.
|
724 |
+
if (key_container->empty()) {
|
725 |
+
key_container->reserve(map_container.size());
|
726 |
+
}
|
727 |
+
for (typename MapContainer::const_iterator it = map_container.begin();
|
728 |
+
it != map_container.end(); ++it) {
|
729 |
+
key_container->push_back(it->first);
|
730 |
+
}
|
731 |
+
}
|
732 |
+
|
733 |
+
// Inserts all the values from map_container into value_container, which must
|
734 |
+
// support push_back(MapContainer::mapped_type).
|
735 |
+
//
|
736 |
+
// Note: any initial contents of the value_container are not cleared.
|
737 |
+
template <class MapContainer, class ValueContainer>
|
738 |
+
void AppendValuesFromMap(const MapContainer& map_container,
|
739 |
+
ValueContainer* value_container) {
|
740 |
+
GOOGLE_CHECK(value_container != NULL);
|
741 |
+
for (typename MapContainer::const_iterator it = map_container.begin();
|
742 |
+
it != map_container.end(); ++it) {
|
743 |
+
value_container->push_back(it->second);
|
744 |
+
}
|
745 |
+
}
|
746 |
+
|
747 |
+
// A more specialized overload of AppendValuesFromMap to optimize reallocations
|
748 |
+
// for the common case in which we're appending values to a vector and hence
|
749 |
+
// can (and sometimes should) call reserve() first.
|
750 |
+
//
|
751 |
+
// (It would be possible to play SFINAE games to call reserve() for any
|
752 |
+
// container that supports it, but this seems to get us 99% of what we need
|
753 |
+
// without the complexity of a SFINAE-based solution.)
|
754 |
+
template <class MapContainer, class ValueType>
|
755 |
+
void AppendValuesFromMap(const MapContainer& map_container,
|
756 |
+
std::vector<ValueType>* value_container) {
|
757 |
+
GOOGLE_CHECK(value_container != NULL);
|
758 |
+
// See AppendKeysFromMap for why this is done.
|
759 |
+
if (value_container->empty()) {
|
760 |
+
value_container->reserve(map_container.size());
|
761 |
+
}
|
762 |
+
for (typename MapContainer::const_iterator it = map_container.begin();
|
763 |
+
it != map_container.end(); ++it) {
|
764 |
+
value_container->push_back(it->second);
|
765 |
+
}
|
766 |
+
}
|
767 |
+
|
768 |
+
} // namespace protobuf
|
769 |
+
} // namespace google
|
770 |
+
|
771 |
+
#endif // GOOGLE_PROTOBUF_STUBS_MAP_UTIL_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/mutex.h
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Copyright (c) 2006, Google Inc.
|
2 |
+
// All rights reserved.
|
3 |
+
//
|
4 |
+
// Redistribution and use in source and binary forms, with or without
|
5 |
+
// modification, are permitted provided that the following conditions are
|
6 |
+
// met:
|
7 |
+
//
|
8 |
+
// * Redistributions of source code must retain the above copyright
|
9 |
+
// notice, this list of conditions and the following disclaimer.
|
10 |
+
// * Redistributions in binary form must reproduce the above
|
11 |
+
// copyright notice, this list of conditions and the following disclaimer
|
12 |
+
// in the documentation and/or other materials provided with the
|
13 |
+
// distribution.
|
14 |
+
// * Neither the name of Google Inc. nor the names of its
|
15 |
+
// contributors may be used to endorse or promote products derived from
|
16 |
+
// this software without specific prior written permission.
|
17 |
+
//
|
18 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
21 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
24 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
25 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
26 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29 |
+
|
30 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_MUTEX_H_
|
31 |
+
#define GOOGLE_PROTOBUF_STUBS_MUTEX_H_
|
32 |
+
|
33 |
+
#include <mutex>
|
34 |
+
|
35 |
+
#include <google/protobuf/stubs/macros.h>
|
36 |
+
|
37 |
+
// ===================================================================
|
38 |
+
// emulates google3/base/mutex.h
|
39 |
+
namespace google {
|
40 |
+
namespace protobuf {
|
41 |
+
namespace internal {
|
42 |
+
|
43 |
+
#define GOOGLE_PROTOBUF_LINKER_INITIALIZED
|
44 |
+
|
45 |
+
// Mutex is a natural type to wrap. As both google and other organization have
|
46 |
+
// specialized mutexes. gRPC also provides an injection mechanism for custom
|
47 |
+
// mutexes.
|
48 |
+
class LIBPROTOBUF_EXPORT WrappedMutex {
|
49 |
+
public:
|
50 |
+
WrappedMutex() = default;
|
51 |
+
void Lock() { mu_.lock(); }
|
52 |
+
void Unlock() { mu_.unlock(); }
|
53 |
+
// Crash if this Mutex is not held exclusively by this thread.
|
54 |
+
// May fail to crash when it should; will never crash when it should not.
|
55 |
+
void AssertHeld() const {}
|
56 |
+
|
57 |
+
private:
|
58 |
+
std::mutex mu_;
|
59 |
+
};
|
60 |
+
|
61 |
+
using Mutex = WrappedMutex;
|
62 |
+
|
63 |
+
// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
|
64 |
+
class LIBPROTOBUF_EXPORT MutexLock {
|
65 |
+
public:
|
66 |
+
explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); }
|
67 |
+
~MutexLock() { this->mu_->Unlock(); }
|
68 |
+
private:
|
69 |
+
Mutex *const mu_;
|
70 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLock);
|
71 |
+
};
|
72 |
+
|
73 |
+
// TODO(kenton): Implement these? Hard to implement portably.
|
74 |
+
typedef MutexLock ReaderMutexLock;
|
75 |
+
typedef MutexLock WriterMutexLock;
|
76 |
+
|
77 |
+
// MutexLockMaybe is like MutexLock, but is a no-op when mu is NULL.
|
78 |
+
class LIBPROTOBUF_EXPORT MutexLockMaybe {
|
79 |
+
public:
|
80 |
+
explicit MutexLockMaybe(Mutex *mu) :
|
81 |
+
mu_(mu) { if (this->mu_ != NULL) { this->mu_->Lock(); } }
|
82 |
+
~MutexLockMaybe() { if (this->mu_ != NULL) { this->mu_->Unlock(); } }
|
83 |
+
private:
|
84 |
+
Mutex *const mu_;
|
85 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLockMaybe);
|
86 |
+
};
|
87 |
+
|
88 |
+
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
|
89 |
+
template<typename T>
|
90 |
+
class ThreadLocalStorage {
|
91 |
+
public:
|
92 |
+
ThreadLocalStorage() {
|
93 |
+
pthread_key_create(&key_, &ThreadLocalStorage::Delete);
|
94 |
+
}
|
95 |
+
~ThreadLocalStorage() {
|
96 |
+
pthread_key_delete(key_);
|
97 |
+
}
|
98 |
+
T* Get() {
|
99 |
+
T* result = static_cast<T*>(pthread_getspecific(key_));
|
100 |
+
if (result == NULL) {
|
101 |
+
result = new T();
|
102 |
+
pthread_setspecific(key_, result);
|
103 |
+
}
|
104 |
+
return result;
|
105 |
+
}
|
106 |
+
private:
|
107 |
+
static void Delete(void* value) {
|
108 |
+
delete static_cast<T*>(value);
|
109 |
+
}
|
110 |
+
pthread_key_t key_;
|
111 |
+
|
112 |
+
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadLocalStorage);
|
113 |
+
};
|
114 |
+
#endif
|
115 |
+
|
116 |
+
} // namespace internal
|
117 |
+
|
118 |
+
// We made these internal so that they would show up as such in the docs,
|
119 |
+
// but we don't want to stick "internal::" in front of them everywhere.
|
120 |
+
using internal::Mutex;
|
121 |
+
using internal::MutexLock;
|
122 |
+
using internal::ReaderMutexLock;
|
123 |
+
using internal::WriterMutexLock;
|
124 |
+
using internal::MutexLockMaybe;
|
125 |
+
|
126 |
+
|
127 |
+
} // namespace protobuf
|
128 |
+
} // namespace google
|
129 |
+
|
130 |
+
#endif // GOOGLE_PROTOBUF_STUBS_MUTEX_H_
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
//
|
33 |
+
// emulates google3/base/once.h
|
34 |
+
//
|
35 |
+
// This header is intended to be included only by internal .cc files and
|
36 |
+
// generated .pb.cc files. Users should not use this directly.
|
37 |
+
//
|
38 |
+
// This is basically a portable version of pthread_once().
|
39 |
+
//
|
40 |
+
// This header declares:
|
41 |
+
// * A type called ProtobufOnceType.
|
42 |
+
// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type
|
43 |
+
// ProtobufOnceType. This is the only legal way to declare such a variable.
|
44 |
+
// The macro may only be used at the global scope (you cannot create local or
|
45 |
+
// class member variables of this type).
|
46 |
+
// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()).
|
47 |
+
// This function, when invoked multiple times given the same ProtobufOnceType
|
48 |
+
// object, will invoke init_func on the first call only, and will make sure
|
49 |
+
// none of the calls return before that first call to init_func has finished.
|
50 |
+
// * The user can provide a parameter which GoogleOnceInit() forwards to the
|
51 |
+
// user-provided function when it is called. Usage example:
|
52 |
+
// int a = 10;
|
53 |
+
// GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a);
|
54 |
+
// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no
|
55 |
+
// static initializer generated).
|
56 |
+
//
|
57 |
+
// This implements a way to perform lazy initialization. It's more efficient
|
58 |
+
// than using mutexes as no lock is needed if initialization has already
|
59 |
+
// happened.
|
60 |
+
//
|
61 |
+
// Example usage:
|
62 |
+
// void Init();
|
63 |
+
// GOOGLE_PROTOBUF_DECLARE_ONCE(once_init);
|
64 |
+
//
|
65 |
+
// // Calls Init() exactly once.
|
66 |
+
// void InitOnce() {
|
67 |
+
// GoogleOnceInit(&once_init, &Init);
|
68 |
+
// }
|
69 |
+
//
|
70 |
+
// Note that if GoogleOnceInit() is called before main() has begun, it must
|
71 |
+
// only be called by the thread that will eventually call main() -- that is,
|
72 |
+
// the thread that performs dynamic initialization. In general this is a safe
|
73 |
+
// assumption since people don't usually construct threads before main() starts,
|
74 |
+
// but it is technically not guaranteed. Unfortunately, Win32 provides no way
|
75 |
+
// whatsoever to statically-initialize its synchronization primitives, so our
|
76 |
+
// only choice is to assume that dynamic initialization is single-threaded.
|
77 |
+
|
78 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__
|
79 |
+
#define GOOGLE_PROTOBUF_STUBS_ONCE_H__
|
80 |
+
|
81 |
+
#include <atomic>
|
82 |
+
#include <mutex>
|
83 |
+
#include <utility>
|
84 |
+
|
85 |
+
namespace google {
|
86 |
+
namespace protobuf {
|
87 |
+
namespace internal {
|
88 |
+
|
89 |
+
using once_flag = std::atomic<int>;
|
90 |
+
|
91 |
+
template <typename Callable, typename... Args>
|
92 |
+
void my_call_once(once_flag& once, Callable&& fn, Args&&... args) {
|
93 |
+
enum CallOnceState {
|
94 |
+
ONCE_INIT = 0,
|
95 |
+
ONCE_RUNNING = 1,
|
96 |
+
ONCE_DONE = 2,
|
97 |
+
};
|
98 |
+
|
99 |
+
int expected_state = ONCE_INIT;
|
100 |
+
if (once.compare_exchange_strong(expected_state, ONCE_RUNNING)) {
|
101 |
+
fn(std::forward<Args>(args)...);
|
102 |
+
once.store(ONCE_DONE);
|
103 |
+
return;
|
104 |
+
}
|
105 |
+
|
106 |
+
if (expected_state == ONCE_DONE) {
|
107 |
+
return;
|
108 |
+
}
|
109 |
+
|
110 |
+
while (once.load() == ONCE_RUNNING) {
|
111 |
+
}
|
112 |
+
}
|
113 |
+
|
114 |
+
template <typename... Args>
|
115 |
+
void call_once(Args&&... args) {
|
116 |
+
my_call_once(std::forward<Args>(args)...);
|
117 |
+
}
|
118 |
+
} // namespace internal
|
119 |
+
|
120 |
+
// TODO(gerbens) remove this once third_party is fully extracted
|
121 |
+
using ProtobufOnceType = internal::once_flag;
|
122 |
+
|
123 |
+
inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {
|
124 |
+
internal::my_call_once(*once, init_func);
|
125 |
+
}
|
126 |
+
|
127 |
+
template <typename Arg>
|
128 |
+
inline void GoogleOnceInitArg(ProtobufOnceType* once, void (*init_func)(Arg*),
|
129 |
+
Arg* arg) {
|
130 |
+
internal::my_call_once(*once, init_func, arg);
|
131 |
+
}
|
132 |
+
|
133 |
+
class GoogleOnceDynamic {
|
134 |
+
public:
|
135 |
+
// If this->Init() has not been called before by any thread,
|
136 |
+
// execute (*func_with_arg)(arg) then return.
|
137 |
+
// Otherwise, wait until that prior invocation has finished
|
138 |
+
// executing its function, then return.
|
139 |
+
template <typename T>
|
140 |
+
void Init(void (*func_with_arg)(T*), T* arg) {
|
141 |
+
GoogleOnceInitArg<T>(&this->state_, func_with_arg, arg);
|
142 |
+
}
|
143 |
+
|
144 |
+
private:
|
145 |
+
ProtobufOnceType state_;
|
146 |
+
};
|
147 |
+
|
148 |
+
#define GOOGLE_PROTOBUF_ONCE_TYPE ::google::protobuf::ProtobufOnceType
|
149 |
+
#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \
|
150 |
+
::google::protobuf::ProtobufOnceType NAME
|
151 |
+
|
152 |
+
} // namespace protobuf
|
153 |
+
} // namespace google
|
154 |
+
|
155 |
+
#endif // GOOGLE_PROTOBUF_STUBS_ONCE_H__
|
cc-multilingual-main/cc_net/third_party/sentencepiece/third_party/protobuf-lite/google/protobuf/stubs/once.h.org
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Protocol Buffers - Google's data interchange format
|
2 |
+
// Copyright 2008 Google Inc. All rights reserved.
|
3 |
+
// https://developers.google.com/protocol-buffers/
|
4 |
+
//
|
5 |
+
// Redistribution and use in source and binary forms, with or without
|
6 |
+
// modification, are permitted provided that the following conditions are
|
7 |
+
// met:
|
8 |
+
//
|
9 |
+
// * Redistributions of source code must retain the above copyright
|
10 |
+
// notice, this list of conditions and the following disclaimer.
|
11 |
+
// * Redistributions in binary form must reproduce the above
|
12 |
+
// copyright notice, this list of conditions and the following disclaimer
|
13 |
+
// in the documentation and/or other materials provided with the
|
14 |
+
// distribution.
|
15 |
+
// * Neither the name of Google Inc. nor the names of its
|
16 |
+
// contributors may be used to endorse or promote products derived from
|
17 |
+
// this software without specific prior written permission.
|
18 |
+
//
|
19 |
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25 |
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26 |
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27 |
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28 |
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
+
|
31 |
+
// Author: [email protected] (Kenton Varda)
|
32 |
+
//
|
33 |
+
// emulates google3/base/once.h
|
34 |
+
//
|
35 |
+
// This header is intended to be included only by internal .cc files and
|
36 |
+
// generated .pb.cc files. Users should not use this directly.
|
37 |
+
//
|
38 |
+
// This is basically a portable version of pthread_once().
|
39 |
+
//
|
40 |
+
// This header declares:
|
41 |
+
// * A type called ProtobufOnceType.
|
42 |
+
// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type
|
43 |
+
// ProtobufOnceType. This is the only legal way to declare such a variable.
|
44 |
+
// The macro may only be used at the global scope (you cannot create local or
|
45 |
+
// class member variables of this type).
|
46 |
+
// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()).
|
47 |
+
// This function, when invoked multiple times given the same ProtobufOnceType
|
48 |
+
// object, will invoke init_func on the first call only, and will make sure
|
49 |
+
// none of the calls return before that first call to init_func has finished.
|
50 |
+
// * The user can provide a parameter which GoogleOnceInit() forwards to the
|
51 |
+
// user-provided function when it is called. Usage example:
|
52 |
+
// int a = 10;
|
53 |
+
// GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a);
|
54 |
+
// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no
|
55 |
+
// static initializer generated).
|
56 |
+
//
|
57 |
+
// This implements a way to perform lazy initialization. It's more efficient
|
58 |
+
// than using mutexes as no lock is needed if initialization has already
|
59 |
+
// happened.
|
60 |
+
//
|
61 |
+
// Example usage:
|
62 |
+
// void Init();
|
63 |
+
// GOOGLE_PROTOBUF_DECLARE_ONCE(once_init);
|
64 |
+
//
|
65 |
+
// // Calls Init() exactly once.
|
66 |
+
// void InitOnce() {
|
67 |
+
// GoogleOnceInit(&once_init, &Init);
|
68 |
+
// }
|
69 |
+
//
|
70 |
+
// Note that if GoogleOnceInit() is called before main() has begun, it must
|
71 |
+
// only be called by the thread that will eventually call main() -- that is,
|
72 |
+
// the thread that performs dynamic initialization. In general this is a safe
|
73 |
+
// assumption since people don't usually construct threads before main() starts,
|
74 |
+
// but it is technically not guaranteed. Unfortunately, Win32 provides no way
|
75 |
+
// whatsoever to statically-initialize its synchronization primitives, so our
|
76 |
+
// only choice is to assume that dynamic initialization is single-threaded.
|
77 |
+
|
78 |
+
#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__
|
79 |
+
#define GOOGLE_PROTOBUF_STUBS_ONCE_H__
|
80 |
+
|
81 |
+
#include <mutex>
|
82 |
+
#include <utility>
|
83 |
+
|
84 |
+
namespace google {
|
85 |
+
namespace protobuf {
|
86 |
+
namespace internal {
|
87 |
+
|
88 |
+
using once_flag = std::once_flag;
|
89 |
+
template <typename... Args>
|
90 |
+
void call_once(Args&&... args ) {
|
91 |
+
std::call_once(std::forward<Args>(args)...);
|
92 |
+
}
|
93 |
+
|
94 |
+
} // namespace internal
|
95 |
+
|
96 |
+
// TODO(gerbens) remove this once third_party is fully extracted
|
97 |
+
using ProtobufOnceType = internal::once_flag;
|
98 |
+
|
99 |
+
inline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {
|
100 |
+
std::call_once(*once, init_func);
|
101 |
+
}
|
102 |
+
|
103 |
+
template <typename Arg>
|
104 |
+
inline void GoogleOnceInitArg(ProtobufOnceType* once, void (*init_func)(Arg*),
|
105 |
+
Arg* arg) {
|
106 |
+
std::call_once(*once, init_func, arg);
|
107 |
+
}
|
108 |
+
|
109 |
+
class GoogleOnceDynamic {
|
110 |
+
public:
|
111 |
+
// If this->Init() has not been called before by any thread,
|
112 |
+
// execute (*func_with_arg)(arg) then return.
|
113 |
+
// Otherwise, wait until that prior invocation has finished
|
114 |
+
// executing its function, then return.
|
115 |
+
template<typename T>
|
116 |
+
void Init(void (*func_with_arg)(T*), T* arg) {
|
117 |
+
GoogleOnceInitArg<T>(&this->state_, func_with_arg, arg);
|
118 |
+
}
|
119 |
+
private:
|
120 |
+
ProtobufOnceType state_;
|
121 |
+
};
|
122 |
+
|
123 |
+
#define GOOGLE_PROTOBUF_ONCE_TYPE ::google::protobuf::ProtobufOnceType
|
124 |
+
#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \
|
125 |
+
::google::protobuf::ProtobufOnceType NAME
|
126 |
+
|
127 |
+
} // namespace protobuf
|
128 |
+
} // namespace google
|
129 |
+
|
130 |
+
#endif // GOOGLE_PROTOBUF_STUBS_ONCE_H__
|